repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/csr_graph.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "graph.hxx" #include <cnmem_shared_ptr.hxx> // interface with CuMem (memory pool lib) for shared ptr namespace nvgraph { /*! A CsrGraph is a graph strored in a CSR data structure. It represents an unweighted graph and has storage for row_offsets and column_indices */ template <typename IndexType_> class CsrGraph : public nvgraph::Graph<IndexType_> { public: typedef IndexType_ IndexType; private: typedef nvgraph::Graph<IndexType> Parent; protected: /*! Storage for the cuda stream */ cudaStream_t stream_; /*! Storage for the row offsets of the CSR data structure. Also called the "row pointer" array. */ SHARED_PREFIX::shared_ptr<IndexType> row_offsets; /*! Storage for the column indices of the CSR data structure. */ SHARED_PREFIX::shared_ptr<IndexType> column_indices; public: /*! Construct an empty \p CsrGraph. */ CsrGraph(void) {} /*! Destruct an empty \p CsrGraph. */ ~CsrGraph(void) {} /*! Construct a \p CsrGraph with a specific shape and number of nonzero entries. * \param num_rows Number of rows. * \param num_cols Number of columns. * \param num_entries Number of nonzero graph entries. */ CsrGraph(size_t num_rows, size_t num_entries, cudaStream_t stream, bool external = false) : Parent(num_rows, num_entries), stream_(stream) { if (external) { row_offsets = nullptr; column_indices = nullptr; } else { row_offsets = allocateDevice<IndexType>((num_rows+1), NULL); column_indices = allocateDevice<IndexType>(num_entries, NULL); } } /*! Construct a \p CsrGraph from another graph. * * \param CsrGraph Another graph in csr */ CsrGraph(const CsrGraph& gr): Parent(gr), row_offsets(gr.row_offsets), column_indices(gr.column_indices) {} /*! Construct a \p CsrGraph from another graph. * * \param CsrGraph Another graph in csr */ CsrGraph(const Parent& gr): Parent(gr) // row_offsets(allocateDevice<IndexType>((gr.get_num_vertices()+1), NULL)), // column_indices(allocateDevice<IndexType>(gr.get_num_edges(), NULL)) {} inline void allocate_row_offsets() { row_offsets = allocateDevice<IndexType>(this->get_num_vertices()+1, NULL); } inline void allocate_column_indices() { column_indices = allocateDevice<IndexType>(this->get_num_edges(), NULL); } inline IndexType* get_raw_row_offsets() { return row_offsets.get(); } inline IndexType* get_raw_column_indices() { return column_indices.get(); } inline void set_raw_row_offsets(IndexType* ptr) { row_offsets = attachDevicePtr<IndexType>(ptr, stream_); } inline void set_raw_column_indices(IndexType* ptr) {column_indices = attachDevicePtr<IndexType>(ptr, stream_); } inline const IndexType* get_raw_row_offsets() const { return row_offsets.get(); } inline const IndexType* get_raw_column_indices() const { return column_indices.get(); } inline cudaStream_t get_stream() const { return stream_; } /*! Resize graph dimensions and underlying storage * * \param num_rows Number of rows. * \param num_cols Number of columns. * \param num_entries Number of nonzero graph entries. */ // We should try not to resize CSR graphs in general // void resize(const size_t num_rows, const size_t num_entries); /*! Swap the contents of two \p CsrGraph objects. * * \param graph Another graph in csr */ void swap(CsrGraph& graph); /*! Assignment from another graph. * * \param graph Another graph in csr */ CsrGraph& operator=(const CsrGraph& graph); //Accept method injection DEFINE_VISITABLE(IndexType_) }; // class CsrGraph } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/shfl.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sm_utils.h" namespace nvgraph{ __device__ __forceinline__ float shflFPAdd( float input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { float output; // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" " @p add.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(firstLane), "f"(input), "r"(mask)); #else asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.up.b32 r0|p, %1, %2, %3;" " @p add.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(firstLane), "f"(input)); #endif return output; } //incorporate into cusparse and try to remove // Inclusive prefix scan step speciliazed for summation of doubles __device__ __forceinline__ double shflFPAdd( double input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { double output; // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" " mov.b64 r0, {lo, hi};" " }" " @p add.f64 r0, r0, %4;" " mov.f64 %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(firstLane), "d"(input), "r"(mask)); #else asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.up.b32 lo|p, lo, %2, %3;" " shfl.up.b32 hi|p, hi, %2, %3;" " mov.b64 r0, {lo, hi};" " }" " @p add.f64 r0, r0, %4;" " mov.f64 %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(firstLane), "d"(input)); #endif return output; } __device__ __forceinline__ float shflFPMin( float input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { float output; //if (threadIdx.x + blockDim.x*blockIdx.x < 4)device_printf("Thread = %d %f\n", threadIdx.x + blockDim.x*blockIdx.x, input); // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" " @p min.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(firstLane), "f"(input), "r"(mask)); #else asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.up.b32 r0|p, %1, %2, %3;" " @p min.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(firstLane), "f"(input)); #endif return output; } //incorporate into cusparse and try to remove // Inclusive prefix scan step speciliazed for summation of doubles __device__ __forceinline__ double shflFPMin( double input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { double output; // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" " mov.b64 r0, {lo, hi};" " }" " @p min.f64 r0, r0, %4;" " mov.f64 %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(firstLane), "d"(input), "r"(mask)); #else asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.up.b32 lo|p, lo, %2, %3;" " shfl.up.b32 hi|p, hi, %2, %3;" " mov.b64 r0, {lo, hi};" " }" " @p min.f64 r0, r0, %4;" " mov.f64 %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(firstLane), "d"(input)); #endif return output; } __device__ __forceinline__ float shflFPMax( float input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { float output; //if (threadIdx.x + blockDim.x*blockIdx.x < 4)device_printf("Thread = %d %f\n", threadIdx.x + blockDim.x*blockIdx.x, input); // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" " @p max.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(firstLane), "f"(input), "r"(mask)); #else asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.up.b32 r0|p, %1, %2, %3;" " @p max.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(firstLane), "f"(input)); #endif return output; //return output; } //incorporate into cusparse and try to remove // Inclusive prefix scan step speciliazed for summation of doubles __device__ __forceinline__ double shflFPMax( double input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { double output; // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" " mov.b64 r0, {lo, hi};" " }" " @p max.f64 r0, r0, %4;" " mov.f64 %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(firstLane), "d"(input), "r"(mask)); #else asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.up.b32 lo|p, lo, %2, %3;" " shfl.up.b32 hi|p, hi, %2, %3;" " mov.b64 r0, {lo, hi};" " }" " @p max.f64 r0, r0, %4;" " mov.f64 %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(firstLane), "d"(input)); #endif return output; } __device__ __forceinline__ float shflFPOr( float input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { float output; //if (threadIdx.x + blockDim.x*blockIdx.x < 4)device_printf("Thread = %d %f\n", threadIdx.x + blockDim.x*blockIdx.x, input); // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" " @p or.b32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(firstLane), "f"(input), "r"(mask)); #else asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.up.b32 r0|p, %1, %2, %3;" " @p or.b32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(firstLane), "f"(input)); #endif return output; } __device__ __forceinline__ double shflFPOr( double input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { double output; // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" " mov.b64 r0, {lo, hi};" " }" " @p or.b64 r0, r0, %4;" " mov.f64 %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(firstLane), "d"(input), "r"(mask)); #else asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.up.b32 lo|p, lo, %2, %3;" " shfl.up.b32 hi|p, hi, %2, %3;" " mov.b64 r0, {lo, hi};" " }" " @p or.b64 r0, r0, %4;" " mov.f64 %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(firstLane), "d"(input)); #endif return output; } //Need to write correct instructions in asm for the operation -log(exp(-x) + exp(-y)) __device__ __forceinline__ float shflFPLog( float input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { float output; float expinput = expf(-input); //this must be shuffled and adding float baseChange = log2(expf(1.0)); //for change of base formaula // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" " @p add.f32 r0, r0, %4;" " mov.f32 %0, r0;" " @p lg2.approx.f32 %0, r0;" //convert to natural logarithm!! //add another variable for e in change of base compute log_e(x) = log_2(x) / log_2(e) " @p neg.f32 %0, r0;" "}" : "=f"(output) : "f"(expinput), "r"(offset), "r"(firstLane), "f"(expinput), "r"(mask)); #else asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.up.b32 r0|p, %1, %2, %3;" " @p add.f32 r0, r0, %4;" " mov.f32 %0, r0;" " @p lg2.approx.f32 %0, r0;" //convert to natural logarithm!! //add another variable for e in change of base compute log_e(x) = log_2(x) / log_2(e) " @p neg.f32 %0, r0;" "}" : "=f"(output) : "f"(expinput), "r"(offset), "r"(firstLane), "f"(expinput)); #endif return output; } //check this!! __device__ __forceinline__ double shflFPLog( double input, //Calling thread's input item. int firstLane, //Index of first lane in segment int offset, //Upstream offset to pull from int mask = DEFAULT_MASK) // lane mask for operation { double output; double expinput = exp(-input); double baseChange = log2(exp(1.0));//divide byt his // Use predicate set from SHFL to guard against invalid peers #if USE_CG asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" " mov.b64 r0, {lo, hi};" " }" " @p add.f64 r0, r0, %4;" " mov.f64 %0, r0;" // " @p lg2.approx.f32 %0, r0;" //f64 not supported!! " @p neg.f64 %0, r0;" "}" : "=d"(output) : "d"(expinput), "r"(offset), "r"(firstLane), "d"(expinput), "r"(mask)); #else asm volatile( "{" " .reg .f64 r0;" " .reg .pred p;" " {" " .reg .u32 lo;" " .reg .u32 hi;" " mov.b64 {lo, hi}, %1;" " shfl.up.b32 lo|p, lo, %2, %3;" " shfl.up.b32 hi|p, hi, %2, %3;" " mov.b64 r0, {lo, hi};" " }" " @p add.f64 r0, r0, %4;" " mov.f64 %0, r0;" // " @p lg2.approx.f32 %0, r0;" //f64 not supported!! " @p neg.f64 %0, r0;" "}" : "=d"(output) : "d"(expinput), "r"(offset), "r"(firstLane), "d"(expinput)); #endif return output; } } //end namespace
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/pagerank.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace nvgraph { template <typename IndexType_, typename ValueType_> class Pagerank { public: typedef IndexType_ IndexType; typedef ValueType_ ValueType; private: ValuedCsrGraph <IndexType, ValueType> m_network ; Vector <ValueType> m_a; Vector <ValueType> m_b; Vector <ValueType> m_pagerank; Vector <ValueType> m_tmp; ValueType m_damping_factor; ValueType m_residual; ValueType m_tolerance; cudaStream_t m_stream; int m_iterations; int m_max_it; bool m_is_setup; bool m_has_guess; bool solve_it(); //void update_dangling_nodes(Vector<ValueType_>& dangling_nodes); void setup(ValueType damping_factor, Vector<ValueType>& initial_guess, Vector<ValueType>& pagerank_vector); public: // Simple constructor Pagerank(void) {}; // Simple destructor ~Pagerank(void) {}; // Create a Pagerank Solver attached to a the transposed of a transition matrix // *** network is the transposed of a transition matrix*** Pagerank(const ValuedCsrGraph <IndexType, ValueType>& network, Vector<ValueType>& dangling_nodes, cudaStream_t stream = 0); // dangling_nodes is a vector of size n where dangling_nodes[i] = 1.0 if vertex i is a dangling node and 0.0 otherwise // pagerank_vector is the output //void solve(ValueType damping_factor, Vector<ValueType>& dangling_nodes, Vector<ValueType>& pagerank_vector); // setup with an initial guess of the pagerank NVGRAPH_ERROR solve(ValueType damping_factor, Vector<ValueType>& initial_guess, Vector<ValueType>& pagerank_vector, float tolerance =1.0E-6, int max_it = 500); inline ValueType get_residual() const {return m_residual;} inline int get_iterations() const {return m_iterations;} // init : // We need the transpose (=converse =reverse) in input (this can be seen as a CSC matrix that we see as CSR) // b is a constant and uniform vector, b = 1.0/num_vertices // a is a constant vector that initialy store the dangling nodes then we set : a = alpha*a + (1-alpha)e // pagerank is 0 // tmp is random ( 1/n is fine) // alpha is a constant scalar (0.85 usually) //loop : // pagerank = csrmv (network, tmp) // scal(pagerank, alpha); //pagerank = alpha*pagerank // gamma = dot(a, tmp); //gamma = a*tmp // pagerank = axpy(b, pagerank, gamma); // pagerank = pagerank+gamma*b // convergence check // tmp = axpby(pagerank, tmp, -1, 1); // tmp = pagerank - tmp // residual_norm = norm(tmp); // if converged (residual_norm) // l1 = l1_norm(pagerank); // pagerank = scal(pagerank, 1/l1); // return pagerank // swap(tmp, pagerank) //end loop }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/partition.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "nvgraph_error.hxx" #include "valued_csr_graph.hxx" #include "matrix.hxx" namespace nvgraph { #define SPECTRAL_USE_COLORING true #define SPECTRAL_USE_LOBPCG true #define SPECTRAL_USE_PRECONDITIONING true #define SPECTRAL_USE_SCALING_OF_EIGVECS false #define SPECTRAL_USE_MAGMA false #define SPECTRAL_USE_THROTTLE true #define SPECTRAL_USE_NORMALIZED_LAPLACIAN true #define SPECTRAL_USE_R_ORTHOGONALIZATION false /// Spectral graph partition /** Compute partition for a weighted undirected graph. This * partition attempts to minimize the cost function: * Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition) * * @param G Weighted graph in CSR format * @param nParts Number of partitions. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter_lanczos Maximum number of Lanczos iterations. * @param restartIter_lanczos Maximum size of Lanczos system before * implicit restart. * @param tol_lanczos Convergence tolerance for Lanczos method. * @param maxIter_kmeans Maximum number of k-means iterations. * @param tol_kmeans Convergence tolerance for k-means algorithm. * @param parts (Output, device memory, n entries) Partition * assignments. * @param iters_lanczos On exit, number of Lanczos iterations * performed. * @param iters_kmeans On exit, number of k-means iterations * performed. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR partition( ValuedCsrGraph<IndexType_,ValueType_>& G, IndexType_ nParts, IndexType_ nEigVecs, IndexType_ maxIter_lanczos, IndexType_ restartIter_lanczos, ValueType_ tol_lanczos, IndexType_ maxIter_kmeans, ValueType_ tol_kmeans, IndexType_ * __restrict__ parts, Vector<ValueType_> &eigVals, Vector<ValueType_> &eigVecs, IndexType_ & iters_lanczos, IndexType_ & iters_kmeans); template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR partition_lobpcg( ValuedCsrGraph<IndexType_,ValueType_>& G, Matrix<IndexType_,ValueType_> * M, cusolverDnHandle_t cusolverHandle, IndexType_ nParts, IndexType_ nEigVecs, IndexType_ maxIter_lanczos, ValueType_ tol_lanczos, IndexType_ maxIter_kmeans, ValueType_ tol_kmeans, IndexType_ * __restrict__ parts, Vector<ValueType_> &eigVals, Vector<ValueType_> &eigVecs, IndexType_ & iters_lanczos, IndexType_ & iters_kmeans); /// Compute cost function for partition /** This function determines the edges cut by a partition and a cost * function: * Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition) * Graph is assumed to be weighted and undirected. * * @param G Weighted graph in CSR format * @param nParts Number of partitions. * @param parts (Input, device memory, n entries) Partition * assignments. * @param edgeCut On exit, weight of edges cut by partition. * @param cost On exit, partition cost function. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR analyzePartition(ValuedCsrGraph<IndexType_,ValueType_> & G, IndexType_ nParts, const IndexType_ * __restrict__ parts, ValueType_ & edgeCut, ValueType_ & cost); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/2d_partitioning.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * 2d_partitioning.h * * Created on: Apr 9, 2018 * Author: jwyles */ #pragma once #include <stdint.h> #include <algorithm> #include <vector> #include <string> #include <sstream> #include <multi_valued_csr_graph.hxx> #include <nvgraph_vector.hxx> #include <cub/device/device_radix_sort.cuh> #include <cub/device/device_run_length_encode.cuh> #include <thrust/extrema.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/tuple.h> #include <thrust/execution_policy.h> #include <thrust/transform.h> namespace nvgraph { template<typename T, typename W> struct CSR_Result_Weighted { int64_t size; int64_t nnz; T* rowOffsets; T* colIndices; W* edgeWeights; CSR_Result_Weighted() : size(0), nnz(0), rowOffsets(NULL), colIndices(NULL), edgeWeights(NULL) { } void Destroy() { if (rowOffsets) cudaFree(rowOffsets); if (colIndices) cudaFree(colIndices); if (edgeWeights) cudaFree(edgeWeights); } }; // Define kernel for copying run length encoded values into offset slots. template<typename T> __global__ void offsetsKernel(T runCounts, T* unique, T* counts, T* offsets) { for (int32_t idx = blockDim.x * blockIdx.x + threadIdx.x; idx < runCounts; idx += gridDim.x * blockDim.x) { offsets[unique[idx]] = counts[idx]; } } /** * Method for converting COO to CSR format * @param sources The array of source indices * @param destinations The array of destination indices * @param edgeWeights The array of edge weights * @param nnz The number of non zero values * @param maxId The largest id contained in the matrix * @param result The result is stored here. */ template<typename T, typename W> void ConvertCOOtoCSR_weighted(T* sources, T* destinations, W* edgeWeights, int64_t nnz, T maxId, CSR_Result_Weighted<T, W>& result) { // Sort source and destination columns by source // Allocate local memory for operating on T* srcs, *dests; W* weights = NULL; cudaMalloc(&srcs, sizeof(T) * nnz); cudaMalloc(&dests, sizeof(T) * nnz); if (edgeWeights) cudaMalloc(&weights, sizeof(W) * nnz); cudaMemcpy(srcs, sources, sizeof(T) * nnz, cudaMemcpyDefault); cudaMemcpy(dests, destinations, sizeof(T) * nnz, cudaMemcpyDefault); if (edgeWeights) cudaMemcpy(weights, edgeWeights, sizeof(W) * nnz, cudaMemcpyDefault); // Call Thrust::sort_by_key to sort the arrays with srcs as keys: if (edgeWeights) thrust::sort_by_key(thrust::device, srcs, srcs + nnz, thrust::make_zip_iterator(thrust::make_tuple(dests, weights))); else thrust::sort_by_key(thrust::device, srcs, srcs + nnz, dests); result.size = maxId + 1; // Allocate offsets array cudaMalloc(&result.rowOffsets, (maxId + 2) * sizeof(T)); // Set all values in offsets array to zeros cudaMemset(result.rowOffsets, 0, (maxId + 2) * sizeof(T)); // Allocate temporary arrays same size as sources array, and single value to get run counts T* unique, *counts, *runCount; cudaMalloc(&unique, (maxId + 1) * sizeof(T)); cudaMalloc(&counts, (maxId + 1) * sizeof(T)); cudaMalloc(&runCount, sizeof(T)); // Use CUB run length encoding to get unique values and run lengths void *tmpStorage = NULL; size_t tmpBytes = 0; cub::DeviceRunLengthEncode::Encode(tmpStorage, tmpBytes, srcs, unique, counts, runCount, nnz); cudaMalloc(&tmpStorage, tmpBytes); cub::DeviceRunLengthEncode::Encode(tmpStorage, tmpBytes, srcs, unique, counts, runCount, nnz); cudaFree(tmpStorage); // Set offsets to run sizes for each index T runCount_h; cudaMemcpy(&runCount_h, runCount, sizeof(T), cudaMemcpyDefault); int threadsPerBlock = 1024; int numBlocks = min(65535, (runCount_h + threadsPerBlock - 1) / threadsPerBlock); offsetsKernel<<<numBlocks, threadsPerBlock>>>(runCount_h, unique, counts, result.rowOffsets); // Scan offsets to get final offsets thrust::exclusive_scan(thrust::device, result.rowOffsets, result.rowOffsets + maxId + 2, result.rowOffsets); // Clean up temporary allocations result.nnz = nnz; result.colIndices = dests; result.edgeWeights = weights; cudaFree(srcs); cudaFree(unique); cudaFree(counts); cudaFree(runCount); } /** * Describes the 2D decomposition of a partitioned matrix. */ template<typename GlobalType, typename LocalType> class MatrixDecompositionDescription { protected: GlobalType numRows; // Global number of rows in matrix GlobalType numCols; // Global number of columns in matrix GlobalType nnz; // Global number of non-zeroes in matrix GlobalType blockRows; // Number of rows of blocks in the decomposition GlobalType blockCols; // Number of columns of rows in the decomposition LocalType offset; // Offsets-like arrays for rows and columns defining the start/end of the // sections of the global id space belonging to each row and column. std::vector<GlobalType> rowOffsets; std::vector<GlobalType> colOffsets; // Array of integers one for each block, defining the device it is assigned to std::vector<int32_t> deviceAssignments; std::vector<cudaStream_t> blockStreams; public: MatrixDecompositionDescription() : numRows(0), numCols(0), nnz(0), blockRows(0), blockCols(0) { rowOffsets.push_back(0); colOffsets.push_back(0); deviceAssignments.push_back(0); } // Basic constructor, just takes in the values of its members. MatrixDecompositionDescription(GlobalType numRows, GlobalType numCols, GlobalType nnz, GlobalType blockRows, GlobalType blockCols, std::vector<GlobalType> rowOffsets, std::vector<GlobalType> colOffsets, std::vector<int32_t> deviceAssignments) : numRows(numRows), numCols(numCols), nnz(nnz), blockRows(blockRows), blockCols(blockCols), rowOffsets(rowOffsets), colOffsets(colOffsets), deviceAssignments(deviceAssignments) { } // Constructs a MatrixDecompositionDescription for a square matrix given the // number of rows in the matrix and number of rows of blocks. MatrixDecompositionDescription(GlobalType numRows, GlobalType numBlockRows, GlobalType nnz, std::vector<int32_t> devices) : numRows(numRows), numCols(numRows), blockRows(numBlockRows), blockCols(numBlockRows), nnz(nnz) { // Tracking the current set device to change back int currentDevice; cudaGetDevice(&currentDevice); // Setting up the row and col offsets into equally sized chunks GlobalType remainder = numRows % blockRows; if (remainder != 0) offset = (numRows + blockRows - remainder) / blockRows; else offset = numRows / blockRows; rowOffsets.resize(blockRows + 1); colOffsets.resize(blockRows + 1); for (int i = 0; i < blockRows; i++) { rowOffsets[i] = i * offset; colOffsets[i] = i * offset; } rowOffsets.back() = blockRows * offset; colOffsets.back() = blockCols * offset; // Setting up the device assignments using the given device ids and also // setting up the stream associated with each block. deviceAssignments.resize(getNumBlocks()); blockStreams.resize(getNumBlocks()); for (int i = 0; i < getNumBlocks(); i++) { int device = devices[i % devices.size()]; deviceAssignments[i] = device; cudaSetDevice(device); cudaStream_t stream; cudaStreamCreate(&stream); blockStreams[i] = stream; } // Restoring to current device when called cudaSetDevice(currentDevice); } // Gets the row id for the block containing the given global row id int32_t getRowId(GlobalType val) const { return std::upper_bound(rowOffsets.begin(), rowOffsets.end(), val) - rowOffsets.begin() - 1; } // Gets the column id for the block containing the given global column id int32_t getColId(GlobalType val) const { return std::upper_bound(colOffsets.begin(), colOffsets.end(), val) - colOffsets.begin() - 1; } // Gets the number of blocks in the decomposition: int32_t getNumBlocks() const { return blockRows * blockCols; } // Getter for offset LocalType getOffset() const { return offset; } // Getter for deviceAssignments const std::vector<int32_t>& getDeviceAssignments() const { return deviceAssignments; } /** * Getter for vector of streams for each block. * @return Reference to vector of streams for each block */ const std::vector<cudaStream_t>& getBlockStreams() const { return blockStreams; } /** * Getter for nnz * @return The global number of non-zero elements */ GlobalType getNnz() const { return nnz; } /** * Getter method for numRows * @return The number of global rows in the matrix */ GlobalType getNumRows() const { return numRows; } /** * Getter for BlockRows * @return The number of blocks in a row in the decomposition. */ GlobalType getBlockRows() const { return blockRows; } /** * Getter for BlockCols * @return The number of blocks in a column in the decomposition. */ GlobalType getBlockCols() const { return blockCols; } /** * Given a block id, returns the row which that block is in. * @param bId The block ID * @return The row number */ int32_t getBlockRow(int32_t bId) const { return bId / blockCols; } /** * Given a block id, returns the column which that block is in. * @param bId The block ID * @return The column number */ int32_t getBlockCol(int32_t bId) const { return bId % blockCols; } /** * Takes a COO global row and produces the COO local row and the block to which it belongs. * @param globalRow The global row ID * @param globalCol The global column ID * @param localRow The block local row ID (return) * @param localCol The block local column ID (return) * @param blockId The block ID (return) */ void convertGlobaltoLocalRow(GlobalType globalRow, GlobalType globalCol, LocalType& localRow, LocalType& localCol, int32_t& blockId) const { int32_t rowId = getRowId(globalRow); int32_t colId = getColId(globalCol); blockId = rowId * blockCols + colId; localRow = globalRow - rowOffsets[rowId]; localCol = globalCol - colOffsets[colId]; } /** * Takes in a row ID and column ID and returns the corresponding block ID * @param rowId The row ID * @param colId The column ID * @return The ID of the corresponding block */ int32_t getBlockId(int32_t rowId, int32_t colId) const { return rowId * blockCols + colId; } /** * Helper method to synchronize all streams after operations are issued. */ void syncAllStreams() const { int32_t numBlocks = getNumBlocks(); int32_t current_device; cudaGetDevice(&current_device); for (int32_t i = 0; i < numBlocks; i++) { cudaSetDevice(deviceAssignments[i]); cudaStreamSynchronize(blockStreams[i]); } cudaSetDevice(current_device); } /** * This method is only for testing and debugging use. * @return A human readable string representation of the object */ std::string toString() const { std::stringstream ss; ss << "Global Info:\n\tnumRows: " << numRows << ", numCols: " << numCols << ", nnz: " << nnz; ss << "\n"; ss << "Block Info:\n\tblockRows: " << blockRows << ", blockCols: " << blockCols; ss << "\n"; ss << "rowOffsets: ["; for (int i = 0; i < (int) rowOffsets.size(); i++) ss << rowOffsets[i] << (i == (int) rowOffsets.size() - 1 ? "]\n" : ", "); ss << "colOffsets: ["; for (int i = 0; i < (int) colOffsets.size(); i++) ss << colOffsets[i] << (i == (int) colOffsets.size() - 1 ? "]\n" : ", "); ss << "deviceAssignments: ["; for (int i = 0; i < (int) deviceAssignments.size(); i++) ss << deviceAssignments[i] << (i == (int) deviceAssignments.size() - 1 ? "]\n" : ", "); return ss.str(); } }; template<typename GlobalType, typename LocalType, typename ValueType> class Matrix2d { protected: // Description of the matrix decomposition MatrixDecompositionDescription<GlobalType, LocalType> description; // Array of block matrices forming the decomposition std::vector<MultiValuedCsrGraph<LocalType, ValueType>*> blocks; public: Matrix2d() { } Matrix2d(MatrixDecompositionDescription<GlobalType, LocalType> descr, std::vector<MultiValuedCsrGraph<LocalType, ValueType>*> blocks) : description(descr), blocks(blocks) { } const MatrixDecompositionDescription<GlobalType, LocalType>& getMatrixDecompositionDescription() { return description; } MultiValuedCsrGraph<LocalType, ValueType>* getBlockMatrix(int32_t bId) { return blocks[bId]; } std::string toString() { std::stringstream ss; ss << "MatrixDecompositionDescription:\n" << description.toString(); for (int i = 0; i < (int) blocks.size(); i++) { ss << "Block " << i << ":\n"; size_t numVerts = blocks[i]->get_num_vertices(); size_t numEdges = blocks[i]->get_num_edges(); size_t numValues = blocks[i]->getNumValues(); ss << "numVerts: " << numVerts << ", numEdges: " << numEdges << "\n"; LocalType* rowOffsets = (LocalType*) malloc((numVerts + 1) * sizeof(LocalType)); LocalType* colIndices = (LocalType*) malloc(numEdges * sizeof(LocalType)); ValueType* values = NULL; if (numValues > 0) values = (ValueType*) malloc(numEdges * sizeof(ValueType)); cudaMemcpy(rowOffsets, blocks[i]->get_raw_row_offsets(), (numVerts + 1) * sizeof(LocalType), cudaMemcpyDefault); cudaMemcpy(colIndices, blocks[i]->get_raw_column_indices(), numEdges * sizeof(LocalType), cudaMemcpyDefault); if (values) cudaMemcpy(values, blocks[i]->get_raw_edge_dim(0), numEdges * sizeof(ValueType), cudaMemcpyDefault); int idxCount = numEdges >= (numVerts + 1) ? numEdges : (numVerts + 1); ss << "Idx\tOffset\tColInd\tValue\n"; for (int j = 0; j < idxCount; j++) { if (j < (int) numVerts + 1 && j < (int) numEdges) ss << j << ":\t" << rowOffsets[j] << "\t" << colIndices[j] << "\t" << (values ? values[j] : 0) << "\n"; else if (j < (int) numVerts + 1 && j >= (int) numEdges) ss << j << ":\t" << rowOffsets[j] << "\n"; else if (j >= (int) numVerts + 1 && j < (int) numEdges) ss << j << ":\t" << "\t" << colIndices[j] << "\t" << (values ? values[j] : 0) << "\n"; } free(rowOffsets); free(colIndices); free(values); } return ss.str(); } }; template<typename GlobalType, typename LocalType, typename ValueType> class VertexData2D { const MatrixDecompositionDescription<GlobalType, LocalType>* description; int32_t n; std::vector<cub::DoubleBuffer<ValueType> > values; public: /** * Creates a VertexData2D object given a pointer to a MatrixDecompositionDescription * object which describes the matrix the data is attached to. Data buffers are * allocated for each block using the offset from the description to size the * buffers, and to locate the buffers on the same GPU as the matrix block. */ VertexData2D(const MatrixDecompositionDescription<GlobalType, LocalType>* descr) : description(descr) { // Resize the values array to be the same size as number of blocks values.resize(descr->getNumBlocks()); // Grab the current device id to switch back after allocations are done int current_device; cudaGetDevice(&current_device); LocalType allocSize = descr->getOffset(); n = allocSize; // Allocate the data for each block for (size_t i = 0; i < descr->getDeviceAssignments().size(); i++) { int device = descr->getDeviceAssignments()[i]; cudaSetDevice(device); ValueType* d_current, *d_alternate; cudaMalloc(&d_current, sizeof(ValueType) * n); cudaMalloc(&d_alternate, sizeof(ValueType) * n); values[i].d_buffers[0] = d_current; values[i].d_buffers[1] = d_alternate; } // Set the device back to what it was initially cudaSetDevice(current_device); } /** * Creates a VertexData2D object given a pointer to a MatrixDecompositionDescription * object, which describes the matrix the data is attached to, and an integer which indicates * how many data elements should be allocated for each block. Data buffers are allocated * for each block using the offset from the description to size the buffers, and to locate * the buffers on the same GPU as the matrix block. */ VertexData2D(const MatrixDecompositionDescription<GlobalType, LocalType>* descr, size_t _n) : description(descr) { // Resize the values array to be the same size as number of blocks values.resize(descr->getNumBlocks()); // Grab the current device id to switch back after allocations are done int current_device; cudaGetDevice(&current_device); LocalType allocSize = _n; n = allocSize; // Allocate the data for each block for (size_t i = 0; i < descr->getDeviceAssignments().size(); i++) { int device = descr->getDeviceAssignments()[i]; cudaSetDevice(device); ValueType* d_current, *d_alternate; cudaMalloc(&d_current, sizeof(ValueType) * n); cudaMalloc(&d_alternate, sizeof(ValueType) * n); values[i].d_buffers[0] = d_current; values[i].d_buffers[1] = d_alternate; } // Set the device back to what it was initially cudaSetDevice(current_device); } ~VertexData2D() { for (size_t i = 0; i < values.size(); i++) { if (values[i].Current()) cudaFree(values[i].Current()); if (values[i].Alternate()) cudaFree(values[i].Alternate()); } } /** * Getter for n the size of each block's allocation in elements. * @return The value of n */ int32_t getN() { return n; } /** * Getter for the MatrixDecompositionDescription associated with this VertexData2D * @return Pointer to the MatrixDecompositionDescription for this VertexData2D */ const MatrixDecompositionDescription<GlobalType, LocalType>* getDescription() { return description; } /** * Gets the current buffer corresponding to the given block ID */ ValueType* getCurrent(int bId) { return values[bId].Current(); } /** * Gets the alternate buffer corresponding to the given block ID */ ValueType* getAlternate(int bId) { return values[bId].Alternate(); } /** * Swaps the current and alternate buffers for all block IDs */ void swapBuffers() { for (size_t i = 0; i < values.size(); i++) values[i].selector ^= 1; } /** * Sets an element in the global array, assuming that the data is currently * valid and in the diagonal blocks. After calling this method either columnScatter * or rowScatter should be called to propagate the change to all blocks. */ void setElement(GlobalType globalIndex, ValueType val) { LocalType blockId = globalIndex / n; LocalType blockOffset = globalIndex % n; int32_t bId = description->getBlockId(blockId, blockId); ValueType* copyTo = values[bId].Current() + blockOffset; cudaMemcpy(copyTo, &val, sizeof(ValueType), cudaMemcpyDefault); } /** * Sets the elements of the global array, using the provided array of values. The values * are set in the blocks of the diagonal, columnScatter or rowScatter should be called * to propogate to all blocks. * @param vals Pointer to an array with the values to be set. */ void setElements(ValueType* vals) { LocalType offset = description->getOffset(); int32_t numRows = description->getBlockRows(); for (int i = 0; i < numRows; i++) { int32_t id = description->getBlockId(i, i); cudaStream_t stream = description->getBlockStreams()[id]; ValueType* copyFrom = vals + i * n; ValueType* copyTo = values[id].Current(); cudaMemcpyAsync(copyTo, copyFrom, sizeof(ValueType) * n, cudaMemcpyDefault, stream); } description->syncAllStreams(); } /** * Fills the elements of the data array with the given value. * The elements on the diagonal are filled with the given value. After filling, * either rowScatter or columnScatter will copy the values across the blocks in * either the rows or columns depending on the use. * @param val The value to fill the array with */ void fillElements(ValueType val) { int current_device; cudaGetDevice(&current_device); int32_t numRows = description->getBlockRows(); for (int32_t i = 0; i < numRows; i++) { int32_t blockId = description->getBlockId(i, i); ValueType* vals = getCurrent(blockId); int deviceId = description->getDeviceAssignments()[blockId]; cudaStream_t stream = description->getBlockStreams()[blockId]; cudaSetDevice(deviceId); thrust::fill(thrust::cuda::par.on(stream), vals, vals + n, val); } description->syncAllStreams(); cudaSetDevice(current_device); } /** * Copies the values of the diagonal blocks in this VertexData2D into the * VertexData2D specified. * @param other Pointer to the VertexData2D to copy into */ void copyTo(VertexData2D<GlobalType, LocalType, ValueType>* other) { const MatrixDecompositionDescription<GlobalType, LocalType>* otherDescr = other->getDescription(); // Do a quick check that the sizes of both block arrays are the same. if (description->getBlockRows() == otherDescr->getBlockRows() && n == other->getN()) { // Issue asynchronous copies for each block's data for (int i = 0; i < description->getBlockRows(); i++) { int32_t bId = description->getBlockId(i, i); ValueType* copyFrom = getCurrent(bId); ValueType* copyTo = other->getCurrent(bId); cudaStream_t stream = description->getBlockStreams()[bId]; cudaMemcpyAsync(copyTo, copyFrom, n * sizeof(ValueType), cudaMemcpyDefault, stream); } // Synchronize the streams after the copies are done for (int i = 0; i < description->getBlockRows(); i++) { int32_t bId = description->getBlockId(i, i); cudaStream_t stream = description->getBlockStreams()[bId]; cudaStreamSynchronize(stream); } } } /** * This method implements a row-wise reduction of each blocks data into a * single array for each row. The block on the diagonal will have the result. */ template<typename Operator> void rowReduce() { int current_device; cudaGetDevice(&current_device); Operator op; // For each row in the decomposition: int32_t numRows = description->getBlockRows(); std::vector<int32_t> blockIds; for (int32_t i = 0; i < numRows; i++) { // Put all the block ids for the row into a vector, with the ID of the diagonal block // at index 0. std::vector<int32_t> blockIds; blockIds.push_back(-1); for (int32_t j = 0; j < numRows; j++) { if (i == j) { blockIds[0] = description->getBlockId(i, j); } else { blockIds.push_back(description->getBlockId(i, j)); } } // Do a binary tree reduction. At each step the primary buffer of the sender is // copied into the secondary buffer of the receiver. After the copy is done // each receiver performs the reduction operator and stores the result in it's // primary buffer. for (int32_t j = 2; (j / 2) < numRows; j *= 2) { for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id] is the receiver int32_t receiverId = blockIds[id]; // blockIds[id + j/2] is the sender int32_t senderId = blockIds[id + j / 2]; // Get the stream associated with the receiver's block id cudaStream_t stream = description->getBlockStreams()[receiverId]; // Copy from the sender to the receiver (use stream associated with receiver) cudaMemcpyAsync(values[receiverId].Alternate(), values[senderId].Current(), sizeof(ValueType) * n, cudaMemcpyDefault, stream); // Invoke the reduction operator on the receiver's GPU and values arrays. cudaSetDevice(description->getDeviceAssignments()[receiverId]); ValueType* input1 = values[receiverId].Alternate(); ValueType* input2 = values[receiverId].Current(); thrust::transform(thrust::cuda::par.on(stream), input1, input1 + n, input2, input2, op); } } // Sync all active streams before next step for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id] is the receiver int32_t receiverId = blockIds[id]; // Set the device to the receiver and sync the stream cudaSetDevice(description->getDeviceAssignments()[receiverId]); cudaStreamSynchronize(description->getBlockStreams()[receiverId]); } } } } cudaSetDevice(current_device); } /** * This method implements a column-wise reduction of each blocks data into a * single array for each column. The block on the diagonal will have the result. */ template<typename Operator> void columnReduce() { int current_device; cudaGetDevice(&current_device); Operator op; // For each column in the decomposition: int32_t numRows = description->getBlockRows(); std::vector<int32_t> blockIds; for (int32_t i = 0; i < numRows; i++) { // Put all the block ids for the row into a vector, with the ID of the diagonal block // at index 0. std::vector<int32_t> blockIds; blockIds.push_back(-1); for (int32_t j = 0; j < numRows; j++) { if (i == j) { blockIds[0] = description->getBlockId(j, i); } else { blockIds.push_back(description->getBlockId(j, i)); } } // Do a binary tree reduction. At each step the primary buffer of the sender is // copied into the secondary buffer of the receiver. After the copy is done // each receiver performs the reduction operator and stores the result in it's // primary buffer. for (int32_t j = 2; (j / 2) < numRows; j *= 2) { for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id] is the receiver int32_t receiverId = blockIds[id]; // blockIds[id + j/2] is the sender int32_t senderId = blockIds[id + j / 2]; // Get the stream associated with the receiver's block id cudaStream_t stream = description->getBlockStreams()[receiverId]; // Copy from the sender to the receiver (use stream associated with receiver) cudaMemcpyAsync(values[receiverId].Alternate(), values[senderId].Current(), sizeof(ValueType) * n, cudaMemcpyDefault, stream); // Invoke the reduction operator on the receiver's GPU and values arrays. cudaSetDevice(description->getDeviceAssignments()[receiverId]); ValueType* input1 = values[receiverId].Alternate(); ValueType* input2 = values[receiverId].Current(); thrust::transform(thrust::cuda::par.on(stream), input1, input1 + n, input2, input2, op); } } // Sync all active streams before next step for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id] is the receiver int32_t receiverId = blockIds[id]; // Set the device to the receiver and sync the stream cudaSetDevice(description->getDeviceAssignments()[receiverId]); cudaStreamSynchronize(description->getBlockStreams()[receiverId]); } } } } cudaSetDevice(current_device); } /** * This implements a column-wise scatter of the global data from the corresponding * row. i.e. The data reduced from row 1 is broadcast to all blocks in * column 1. It is assumed that the data to broadcast is located in the block on * the diagonal. */ void columnScatter() { int current_device; cudaGetDevice(&current_device); // For each column in the decomposition: int32_t numRows = description->getBlockRows(); std::vector<int32_t> blockIds; for (int32_t i = 0; i < numRows; i++) { // Put all the block ids for the column into a vector, with the ID of the diagonal block // at index 0. std::vector<int32_t> blockIds; blockIds.push_back(-1); for (int32_t j = 0; j < numRows; j++) { if (i == j) { blockIds[0] = description->getBlockId(j, i); } else { blockIds.push_back(description->getBlockId(j, i)); } } // Do a binary tree scatter. At each step the primary buffer of the sender is // copied into the primary buffer of the receiver. int32_t max2pow = 2; while (max2pow < numRows) { max2pow *= 2; } for (int32_t j = max2pow; j >= 2; j /= 2) { for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id] is the sender int32_t senderId = blockIds[id]; // blockIds[id + j/2] is the sender int32_t receiverId = blockIds[id + j / 2]; // Get the stream associated with the receiver's block id cudaStream_t stream = description->getBlockStreams()[receiverId]; // Copy from the sender to the receiver (use stream associated with receiver) cudaMemcpyAsync(values[receiverId].Current(), values[senderId].Current(), sizeof(ValueType) * n, cudaMemcpyDefault, stream); } } // Synchronize all the active streams before next step. for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id + j/2] is the sender int32_t receiverId = blockIds[id + j / 2]; // Set device and sync receiver's stream cudaSetDevice(description->getDeviceAssignments()[receiverId]); cudaStreamSynchronize(description->getBlockStreams()[receiverId]); } } } } cudaSetDevice(current_device); } /** * This implements a row-wise scatter of the global data from the corresponding * column. i.e. The data reduced from column 1 is broadcast to all blocks in * row 1. It is assumed that the data to broadcast is located in the block on * the diagonal. */ void rowScatter() { int current_device; cudaGetDevice(&current_device); // For each row in the decomposition: int32_t numRows = description->getBlockRows(); std::vector<int32_t> blockIds; for (int32_t i = 0; i < numRows; i++) { // Put all the block ids for the column into a vector, with the ID of the diagonal block // at index 0. std::vector<int32_t> blockIds; blockIds.push_back(-1); for (int32_t j = 0; j < numRows; j++) { if (i == j) { blockIds[0] = description->getBlockId(i, j); } else { blockIds.push_back(description->getBlockId(i, j)); } } // Do a binary tree scatter. At each step the primary buffer of the sender is // copied into the primary buffer of the receiver. int32_t max2pow = 2; while (max2pow < numRows) { max2pow *= 2; } for (int32_t j = max2pow; j >= 2; j /= 2) { for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id] is the sender int32_t senderId = blockIds[id]; // blockIds[id + j/2] is the receiver int32_t receiverId = blockIds[id + j / 2]; // Get the stream associated with the receiver's block id cudaStream_t stream = description->getBlockStreams()[receiverId]; // Copy from the sender to the receiver (use stream associated with receiver) cudaMemcpyAsync(values[receiverId].Current(), values[senderId].Current(), sizeof(ValueType) * n, cudaMemcpyDefault, stream); } } // Sync all the active streams before next step for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id + j/2] is the receiver int32_t receiverId = blockIds[id + j / 2]; // Set device and sync receiver's stream cudaSetDevice(description->getDeviceAssignments()[receiverId]); cudaStreamSynchronize(description->getBlockStreams()[receiverId]); } } } } cudaSetDevice(current_device); } /** * Outputs a human readable string representation of this Vertex2d object. This is only * intended to be used for de-bugging. * @return Human readable string representation */ std::string toString() { std::stringstream ss; ValueType* c = (ValueType*) malloc(sizeof(ValueType) * n); ValueType* a = (ValueType*) malloc(sizeof(ValueType) * n); int32_t numBlocks = description->getNumBlocks(); ss << "Vertex2d:\n"; for (int32_t i = 0; i < numBlocks; i++) { ss << "Block " << i << ":\n"; ss << "Idx\tCur\tAlt\n"; cudaMemcpy(c, values[i].Current(), sizeof(ValueType) * n, cudaMemcpyDefault); cudaMemcpy(a, values[i].Alternate(), sizeof(ValueType) * n, cudaMemcpyDefault); for (int32_t j = 0; j < n; j++) { ss << j << ":\t" << c[j] << "\t" << a[j] << "\n"; } } free(c); free(a); return ss.str(); } }; template<typename GlobalType, typename LocalType, typename ValueType> class VertexData2D_Unbuffered { const MatrixDecompositionDescription<GlobalType, LocalType>* description; int32_t n; std::vector<ValueType*> values; public: /** * Sets up a VertexData2D_Unbuffered object with an element allocated for each vertex * in each block. * @param descr Pointer to a MatrixDecompositionDescription object describing the layout * of the 2D blocks. */ VertexData2D_Unbuffered(const MatrixDecompositionDescription<GlobalType, LocalType>* descr) : description(descr) { // Resize the values array to be the same size as number of blocks values.resize(descr->getNumBlocks()); // Grab the current device id to switch back after allocations are done int current_device; cudaGetDevice(&current_device); LocalType allocSize = descr->getOffset(); n = allocSize; // Allocate the data for each block for (size_t i = 0; i < descr->getDeviceAssignments().size(); i++) { int device = descr->getDeviceAssignments()[i]; cudaSetDevice(device); cudaMalloc(&(values[i]), sizeof(ValueType) * n); } // Set the device back to what it was initially cudaSetDevice(current_device); } /** * Sets up a VertexData2D_Unbuffered object with _n elements allocated per block. * @param descr Pointer to a MatrixDecompositionDescription object describing the layout * of the 2D blocks. * @param _n The number of elements to allocate per block. */ VertexData2D_Unbuffered(const MatrixDecompositionDescription<GlobalType, LocalType>* descr, size_t _n) : description(descr), n(_n) { // Resize the values array to be the same size as number of blocks values.resize(descr->getNumBlocks()); // Grab the current device id to switch back after allocations are done int current_device; cudaGetDevice(&current_device); // Allocate the data for each block for (size_t i = 0; i < descr->getDeviceAssignments().size(); i++) { int device = descr->getDeviceAssignments()[i]; cudaSetDevice(device); cudaMalloc(&(values[i]), sizeof(ValueType) * n); } // Set the device back to what it was initially cudaSetDevice(current_device); } /** * Destructor. Frees all allocated memory. */ ~VertexData2D_Unbuffered() { for (size_t i = 0; i < values.size(); i++) { if (values[i]) { cudaFree(values[i]); } } } /** * Fills the elements of the data array with the given value. * The elements on the diagonal are filled with the given value. After filling, * either rowScatter or columnScatter will copy the values across the blocks in * either the rows or columns depending on the use. * @param val The value to fill the array with */ void fillElements(ValueType val) { int current_device; cudaGetDevice(&current_device); int32_t numRows = description->getBlockRows(); for (int32_t i = 0; i < numRows; i++) { int32_t blockId = description->getBlockId(i, i); ValueType* vals = get(blockId); int deviceId = description->getDeviceAssignments()[blockId]; cudaStream_t stream = description->getBlockStreams()[blockId]; cudaSetDevice(deviceId); thrust::fill(thrust::cuda::par.on(stream), vals, vals + n, val); } description->syncAllStreams(); cudaSetDevice(current_device); } /** * This implements a column-wise scatter of the global data from the corresponding * row. i.e. The data reduced from row 1 is broadcast to all blocks in * column 1. It is assumed that the data to broadcast is located in the block on * the diagonal. */ void columnScatter() { int current_device; cudaGetDevice(&current_device); // For each column in the decomposition: int32_t numRows = description->getBlockRows(); std::vector<int32_t> blockIds; for (int32_t i = 0; i < numRows; i++) { // Put all the block ids for the column into a vector, with the ID of the diagonal block // at index 0. std::vector<int32_t> blockIds; blockIds.push_back(-1); for (int32_t j = 0; j < numRows; j++) { if (i == j) { blockIds[0] = description->getBlockId(j, i); } else { blockIds.push_back(description->getBlockId(j, i)); } } // Do a binary tree scatter. At each step the primary buffer of the sender is // copied into the primary buffer of the receiver. int32_t max2pow = 2; while (max2pow < numRows) { max2pow *= 2; } for (int32_t j = max2pow; j >= 2; j /= 2) { for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id] is the sender int32_t senderId = blockIds[id]; // blockIds[id + j/2] is the sender int32_t receiverId = blockIds[id + j / 2]; // Get the stream associated with the receiver's block id cudaStream_t stream = description->getBlockStreams()[receiverId]; // Copy from the sender to the receiver (use stream associated with receiver) cudaMemcpyAsync(values[receiverId], values[senderId], sizeof(ValueType) * n, cudaMemcpyDefault, stream); } } // Synchronize all the active streams before next step. for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id + j/2] is the sender int32_t receiverId = blockIds[id + j / 2]; // Set device and sync receiver's stream cudaSetDevice(description->getDeviceAssignments()[receiverId]); cudaStreamSynchronize(description->getBlockStreams()[receiverId]); } } } } cudaSetDevice(current_device); } /** * This implements a row-wise scatter of the global data from the corresponding * column. i.e. The data reduced from column 1 is broadcast to all blocks in * row 1. It is assumed that the data to broadcast is located in the block on * the diagonal. */ void rowScatter() { int current_device; cudaGetDevice(&current_device); // For each row in the decomposition: int32_t numRows = description->getBlockRows(); std::vector<int32_t> blockIds; for (int32_t i = 0; i < numRows; i++) { // Put all the block ids for the column into a vector, with the ID of the diagonal block // at index 0. std::vector<int32_t> blockIds; blockIds.push_back(-1); for (int32_t j = 0; j < numRows; j++) { if (i == j) { blockIds[0] = description->getBlockId(i, j); } else { blockIds.push_back(description->getBlockId(i, j)); } } // Do a binary tree scatter. At each step the primary buffer of the sender is // copied into the primary buffer of the receiver. int32_t max2pow = 2; while (max2pow < numRows) { max2pow *= 2; } for (int32_t j = max2pow; j >= 2; j /= 2) { for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id] is the sender int32_t senderId = blockIds[id]; // blockIds[id + j/2] is the receiver int32_t receiverId = blockIds[id + j / 2]; // Get the stream associated with the receiver's block id cudaStream_t stream = description->getBlockStreams()[receiverId]; // Copy from the sender to the receiver (use stream associated with receiver) cudaMemcpyAsync(values[receiverId], values[senderId], sizeof(ValueType) * n, cudaMemcpyDefault, stream); } } // Sync all the active streams before next step for (int32_t id = 0; id < numRows; id++) { if (id % j == 0 && id + j / 2 < numRows) { // blockIds[id + j/2] is the receiver int32_t receiverId = blockIds[id + j / 2]; // Set device and sync receiver's stream cudaSetDevice(description->getDeviceAssignments()[receiverId]); cudaStreamSynchronize(description->getBlockStreams()[receiverId]); } } } } cudaSetDevice(current_device); } /** * Getter for n * @return The value of n */ int32_t getN() { return n; } /** * Gets the pointer to the allocated memory for a specified block. * @param bId The block id to get the memory for. * @return A pointer to the allocated memory for the given block. */ ValueType* get(int32_t bId) { return values[bId]; } }; /** * This method takes in COO format matrix data and a MatrixDecompositionDescription and * returns a Matrix2d object containing the given data. */ template<typename GlobalType, typename LocalType, typename ValueType> Matrix2d<GlobalType, LocalType, ValueType> COOto2d(MatrixDecompositionDescription<GlobalType, LocalType> descr, GlobalType* rowIds, GlobalType* colIds, ValueType* values) { // Grab the current device id to switch back after allocations are done int current_device; cudaGetDevice(&current_device); int32_t blockCount = descr.getNumBlocks(); // Allocate array of size global nnz to hold the block labels int32_t* blockLabels = (int32_t*) malloc(descr.getNnz() * sizeof(int32_t)); // Allocate array to contain row counts for each block and initialize to zero // Allocate array to contain position offsets for writing each blocks data LocalType* blockCounts = (LocalType*) malloc(blockCount * sizeof(LocalType)); LocalType* blockPos = (LocalType*) malloc(blockCount * sizeof(LocalType)); for (int i = 0; i < blockCount; i++) { blockCounts[i] = 0; blockPos[i] = 0; } // For each edge mark in the array the id of the block to which it will belong int32_t blockId; LocalType localRow; LocalType localCol; for (int i = 0; i < descr.getNnz(); i++) { descr.convertGlobaltoLocalRow(rowIds[i], colIds[i], localRow, localCol, blockId); blockLabels[i] = blockId; blockCounts[blockId]++; } // Allocate arrays for putting each blocks data into LocalType** blockRowIds = (LocalType**) malloc(blockCount * sizeof(LocalType*)); LocalType** blockColIds = (LocalType**) malloc(blockCount * sizeof(LocalType*)); ValueType** blockValues = NULL; if (values) blockValues = (ValueType**) malloc(blockCount * sizeof(ValueType*)); for (int i = 0; i < blockCount; i++) { blockRowIds[i] = (LocalType*) malloc(blockCounts[i] * sizeof(LocalType)); blockColIds[i] = (LocalType*) malloc(blockCounts[i] * sizeof(LocalType)); if (values) blockValues[i] = (ValueType*) malloc(blockCounts[i] * sizeof(ValueType)); } // Convert each blocks global rows to local ids and copy into block arrays for (int i = 0; i < descr.getNnz(); i++) { descr.convertGlobaltoLocalRow(rowIds[i], colIds[i], localRow, localCol, blockId); blockRowIds[blockId][blockPos[blockId]] = localRow; blockColIds[blockId][blockPos[blockId]] = localCol; if (values) blockValues[blockId][blockPos[blockId]] = values[i]; blockPos[blockId]++; } // Allocate the result blocks vector std::vector<MultiValuedCsrGraph<LocalType, ValueType>*> blockVector(blockCount); // Convert each blocks COO rows into CSR and create it's graph object. for (int i = 0; i < blockCount; i++) { // Set the device as indicated so the data ends up on the right GPU cudaSetDevice(descr.getDeviceAssignments()[i]); cudaStream_t stream = descr.getBlockStreams()[i]; if (blockCounts[i] > 0) { CSR_Result_Weighted<LocalType, ValueType> result; ConvertCOOtoCSR_weighted(blockRowIds[i], blockColIds[i], values ? blockValues[i] : NULL, (int64_t) blockCounts[i], (descr.getOffset() - 1), result); MultiValuedCsrGraph<LocalType, ValueType>* csrGraph = new MultiValuedCsrGraph<LocalType, ValueType>((size_t) result.size, (size_t) result.nnz, stream); if (values) csrGraph->allocateEdgeData(1, NULL); cudaMemcpy(csrGraph->get_raw_row_offsets(), result.rowOffsets, (result.size + 1) * sizeof(LocalType), cudaMemcpyDefault); cudaMemcpy(csrGraph->get_raw_column_indices(), result.colIndices, result.nnz * sizeof(LocalType), cudaMemcpyDefault); if (values) cudaMemcpy(csrGraph->get_raw_edge_dim(0), result.edgeWeights, result.nnz * sizeof(LocalType), cudaMemcpyDefault); blockVector[i] = csrGraph; result.Destroy(); } else { MultiValuedCsrGraph<LocalType, ValueType>* csrGraph = new MultiValuedCsrGraph<LocalType, ValueType>((size_t) descr.getOffset(), (size_t) 0, stream); cudaMemset( csrGraph->get_raw_row_offsets(), 0, sizeof(LocalType) * (descr.getOffset() + 1)); blockVector[i] = csrGraph; } } // Free temporary memory for (int i = 0; i < blockCount; i++) { free(blockRowIds[i]); free(blockColIds[i]); if (values) free(blockValues[i]); } free(blockRowIds); free(blockColIds); if (values) free(blockValues); cudaSetDevice(current_device); // Put it all together into a Matrix2d object for return return Matrix2d<GlobalType, LocalType, ValueType>(descr, blockVector); } }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/jaccard_gpu.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Jaccard symilarity edge weights // Author: Alexandre Fender afender@nvidia.com and Maxim Naumov. #pragma once namespace nvlouvain { template <bool weighted, typename T> int jaccard(int n, int e, int *csrPtr, int *csrInd, T * csrVal, T *v, T *work, T gamma, T *weight_i, T *weight_s, T *weight_j); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/multi_valued_csr_graph.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "csr_graph.hxx" #include "valued_csr_graph.hxx" #include <vector> namespace nvgraph { template <typename IndexType_, typename ValueType_> class MultiValuedCsrGraph : public nvgraph::CsrGraph<IndexType_> { public: typedef IndexType_ IndexType; typedef ValueType_ ValueType; private: typedef nvgraph::CsrGraph<IndexType> Parent; protected: /*! Storage for the nonzero entries of the multi CSR data structure. */ //std::vector <nvgraph::Vector<ValueType>*> values_dim; //std::vector <nvgraph::Vector<ValueType>*> vertex_dim; std::vector <SHARED_PREFIX::shared_ptr<nvgraph::Vector<ValueType> > > values_dim; std::vector <SHARED_PREFIX::shared_ptr<nvgraph::Vector<ValueType> > > vertex_dim; public: /*! Storage for the nonzero entries of the Multi-CSR data structure.*/ MultiValuedCsrGraph(void) {} ~MultiValuedCsrGraph(void) { //for (int i = 0; i < n_vertex_dim; ++i) // if (vertex_dim[i]) // delete vertex_dim[i]; // for (int i = 0; i < n_edges_dim; ++i) // if (values_dim[i]) // delete values_dim[i]; } /*! Construct a \p MultiValuedCsrGraph with a specific shape and number of nonzero entries. * * \param num_rows Number of rows. * \param num_entries Number of nonzero graph entries. * \param num_dimensions Number of dimensions (ie. number of values arrays). */ MultiValuedCsrGraph(size_t num_rows, size_t num_entries, cudaStream_t stream) : Parent(num_rows, num_entries, stream) { } /*! Construct a \p MultiValuedCsrGraph from another graph.*/ MultiValuedCsrGraph(const MultiValuedCsrGraph& gr) : Parent(gr), values_dim(gr.values_dim), vertex_dim(gr.vertex_dim) {} MultiValuedCsrGraph(const Parent& gr) : Parent(gr) {} inline void allocateVertexData(size_t v_dim, cudaStream_t stream) { vertex_dim.resize(v_dim); for (size_t i = 0; i < vertex_dim.size(); ++i) vertex_dim[i] = SHARED_PREFIX::shared_ptr<nvgraph::Vector<ValueType> >(new Vector<ValueType>(this->num_vertices, stream)); } inline void allocateEdgeData(size_t edges_dim, cudaStream_t stream) { values_dim.resize(edges_dim); for (size_t i = 0; i < values_dim.size(); ++i) values_dim[i] = SHARED_PREFIX::shared_ptr<nvgraph::Vector<ValueType> >(new Vector<ValueType>(this->num_edges, stream)); } inline void attachVertexData(size_t i, ValueType* data, cudaStream_t stream) { if (vertex_dim.size() <= i) vertex_dim.resize(i+1); vertex_dim[i] = SHARED_PREFIX::shared_ptr<nvgraph::Vector<ValueType> >(new Vector<ValueType>(this->num_vertices, data, stream)); } inline void attachEdgeData(size_t i, ValueType* data, cudaStream_t stream) { if (values_dim.size() <= i) values_dim.resize(i+1); values_dim[i] = SHARED_PREFIX::shared_ptr<nvgraph::Vector<ValueType> >(new Vector<ValueType>(this->num_edges, data, stream)); } inline size_t getNumValues() { return values_dim.size(); } inline size_t get_num_vertex_dim() const { return vertex_dim.size(); } inline size_t get_num_edge_dim() const { return values_dim.size(); } inline Vector<ValueType>& get_vertex_dim(size_t v_dim) { return *vertex_dim[v_dim]; } inline Vector<ValueType>& get_edge_dim(size_t e_dim) { return *values_dim[e_dim]; } inline ValueType* get_raw_vertex_dim(size_t v_dim) { return vertex_dim[v_dim]->raw(); } inline ValueType* get_raw_edge_dim(size_t e_dim) { return values_dim[e_dim]->raw(); } inline const Vector<ValueType>& get_vertex_dim(size_t v_dim) const { return *vertex_dim[v_dim]; } inline const Vector<ValueType>& get_edge_dim(size_t e_dim) const { return *values_dim[e_dim]; } inline const ValueType* get_raw_vertex_dim(size_t v_dim) const { return vertex_dim[v_dim]->raw(); } inline const ValueType* get_raw_edge_dim(size_t e_dim) const { return values_dim[e_dim]->raw(); } /*! Extract a \p ValuedCsrGraph from a given dimension of the \p MultiValuedCsrGraph * \param dim_index Wanted dimension of the \p MultiValuedCsrGraph */ ValuedCsrGraph<IndexType, ValueType>* get_valued_csr_graph(const size_t dim_index) { //ValuedCsrGraph<IndexType, ValueType> *v = new ValuedCsrGraph<IndexType, ValueType>(static_cast<nvgraph::CsrGraph<IndexType> >(*this), *values_dim[dim_index]); //return *v; //SHARED_PREFIX::shared_ptr<ValuedCsrGraph<IndexType, ValueType> > svcsr = SHARED_PREFIX::shared_ptr<ValuedCsrGraph<IndexType, ValueType> >(new ValuedCsrGraph<IndexType, ValueType>(static_cast<nvgraph::CsrGraph<IndexType> >(*this), *values_dim[dim_index])); //return svcsr; //segfaults ///return ValuedCsrGraph<IndexType, ValueType>(static_cast<nvgraph::CsrGraph<IndexType> >(*this), *values_dim[dim_index]);//segfaults ValuedCsrGraph<IndexType, ValueType>* pvcsr = new ValuedCsrGraph<IndexType, ValueType>(static_cast<nvgraph::CsrGraph<IndexType> >(*this), *values_dim[dim_index]); return pvcsr; } /*! Assignment from another MultiValuedCsrGraph graph. * * \param graph Another MultiValuedCsrGraph */ MultiValuedCsrGraph& operator=(const MultiValuedCsrGraph& graph); //RESIZE: We should try not to resize MULTI CSR graphs in general for performance reasons // SET //Set should be done in a safe way in the API // it is possible to use a cudaMemcpy like : cudaMemcpy(G.get_raw_vertex_dim(1), v_h, // (size_t)(n*sizeof(v_h[0])), // cudaMemcpyHostToDevice); //Accept method injection DEFINE_VISITABLE(IndexType_) }; // class MultiValuedCsrGraph }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/matrix.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda.h> #include <cublas_v2.h> #include <curand.h> #include <cusolverDn.h> #include <cusparse.h> #include "nvgraph_vector.hxx" #include "valued_csr_graph.hxx" namespace nvgraph { /// Abstract matrix class /** Derived classes must implement matrix-vector products. */ template <typename IndexType_, typename ValueType_> class Matrix { public: /// Number of rows const IndexType_ m; /// Number of columns const IndexType_ n; /// CUDA stream cudaStream_t s; /// Constructor /** @param _m Number of rows. * @param _n Number of columns. */ Matrix(IndexType_ _m, IndexType_ _n) : m(_m), n(_n), s(0){} /// Destructor virtual ~Matrix() {} /// Get and Set CUDA stream virtual void setCUDAStream(cudaStream_t _s) = 0; virtual void getCUDAStream(cudaStream_t *_s) = 0; /// Matrix-vector product /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output * vector. */ virtual void mv(ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const = 0; virtual void mm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const = 0; /// Color and Reorder virtual void color(IndexType_ *c, IndexType_ *p) const = 0; virtual void reorder(IndexType_ *p) const = 0; /// Incomplete Cholesky (setup, factor and solve) virtual void prec_setup(Matrix<IndexType_,ValueType_> * _M) = 0; virtual void prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ * __restrict__ fx, ValueType_ * __restrict__ t) const = 0; //Get the sum of all edges virtual ValueType_ getEdgeSum() const = 0; }; /// Dense matrix class template <typename IndexType_, typename ValueType_> class DenseMatrix : public Matrix<IndexType_, ValueType_> { private: /// Whether to transpose matrix const bool trans; /// Matrix entries, stored column-major in device memory const ValueType_ * A; /// Leading dimension of matrix entry array const IndexType_ lda; public: /// Constructor DenseMatrix(bool _trans, IndexType_ _m, IndexType_ _n, const ValueType_ * _A, IndexType_ _lda); /// Destructor virtual ~DenseMatrix(); /// Get and Set CUDA stream virtual void setCUDAStream(cudaStream_t _s); virtual void getCUDAStream(cudaStream_t *_s); /// Matrix-vector product virtual void mv(ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Matrix-set of k vectors product virtual void mm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Color and Reorder virtual void color(IndexType_ *c, IndexType_ *p) const; virtual void reorder(IndexType_ *p) const; /// Incomplete Cholesky (setup, factor and solve) virtual void prec_setup(Matrix<IndexType_,ValueType_> * _M); virtual void prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ * __restrict__ fx, ValueType_ * __restrict__ t) const; //Get the sum of all edges virtual ValueType_ getEdgeSum() const; }; /// Sparse matrix class in CSR format template <typename IndexType_, typename ValueType_> class CsrMatrix : public Matrix<IndexType_, ValueType_> { private: /// Whether to transpose matrix const bool trans; /// Whether matrix is stored in symmetric format const bool sym; /// Number of non-zero entries const IndexType_ nnz; /// Matrix properties const cusparseMatDescr_t descrA; /// Matrix entry values (device memory) /*const*/ ValueType_ * csrValA; /// Pointer to first entry in each row (device memory) const IndexType_ * csrRowPtrA; /// Column index of each matrix entry (device memory) const IndexType_ * csrColIndA; /// Analysis info (pointer to opaque CUSPARSE struct) cusparseSolveAnalysisInfo_t info_l; cusparseSolveAnalysisInfo_t info_u; /// factored flag (originally set to false, then reset to true after factorization), /// notice we only want to factor once bool factored; public: /// Constructor CsrMatrix(bool _trans, bool _sym, IndexType_ _m, IndexType_ _n, IndexType_ _nnz, const cusparseMatDescr_t _descrA, /*const*/ ValueType_ * _csrValA, const IndexType_ * _csrRowPtrA, const IndexType_ * _csrColIndA); /// Constructor CsrMatrix( ValuedCsrGraph<IndexType_,ValueType_> & G, const cusparseMatDescr_t _descrA =0); /// Destructor virtual ~CsrMatrix(); /// Get and Set CUDA stream virtual void setCUDAStream(cudaStream_t _s); virtual void getCUDAStream(cudaStream_t *_s); /// Matrix-vector product virtual void mv(ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Matrix-set of k vectors product virtual void mm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Color and Reorder virtual void color(IndexType_ *c, IndexType_ *p) const; virtual void reorder(IndexType_ *p) const; /// Incomplete Cholesky (setup, factor and solve) virtual void prec_setup(Matrix<IndexType_,ValueType_> * _M); virtual void prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ * __restrict__ fx, ValueType_ * __restrict__ t) const; //Get the sum of all edges virtual ValueType_ getEdgeSum() const; }; /// Graph Laplacian matrix template <typename IndexType_, typename ValueType_> class LaplacianMatrix : public Matrix<IndexType_, ValueType_> { private: /// Adjacency matrix /*const*/ Matrix<IndexType_, ValueType_> * A; /// Degree of each vertex Vector<ValueType_> D; /// Preconditioning matrix Matrix<IndexType_, ValueType_> * M; public: /// Constructor LaplacianMatrix(/*const*/ Matrix<IndexType_,ValueType_> & _A); /// Destructor virtual ~LaplacianMatrix(); /// Get and Set CUDA stream virtual void setCUDAStream(cudaStream_t _s); virtual void getCUDAStream(cudaStream_t *_s); /// Matrix-vector product virtual void mv(ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Matrix-set of k vectors product virtual void mm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Scale a set of k vectors by a diagonal virtual void dm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Color and Reorder virtual void color(IndexType_ *c, IndexType_ *p) const; virtual void reorder(IndexType_ *p) const; /// Solve preconditioned system M x = f for a set of k vectors virtual void prec_setup(Matrix<IndexType_,ValueType_> * _M); virtual void prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ * __restrict__ fx, ValueType_ * __restrict__ t) const; //Get the sum of all edges virtual ValueType_ getEdgeSum() const; }; /// Modularity matrix template <typename IndexType_, typename ValueType_> class ModularityMatrix : public Matrix<IndexType_, ValueType_> { private: /// Adjacency matrix /*const*/ Matrix<IndexType_, ValueType_> * A; /// Degree of each vertex Vector<ValueType_> D; IndexType_ nnz; ValueType_ edge_sum; /// Preconditioning matrix Matrix<IndexType_, ValueType_> * M; public: /// Constructor ModularityMatrix(/*const*/ Matrix<IndexType_,ValueType_> & _A, IndexType_ _nnz); /// Destructor virtual ~ModularityMatrix(); /// Get and Set CUDA stream virtual void setCUDAStream(cudaStream_t _s); virtual void getCUDAStream(cudaStream_t *_s); /// Matrix-vector product virtual void mv(ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Matrix-set of k vectors product virtual void mm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Scale a set of k vectors by a diagonal virtual void dm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const; /// Color and Reorder virtual void color(IndexType_ *c, IndexType_ *p) const; virtual void reorder(IndexType_ *p) const; /// Solve preconditioned system M x = f for a set of k vectors virtual void prec_setup(Matrix<IndexType_,ValueType_> * _M); virtual void prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ * __restrict__ fx, ValueType_ * __restrict__ t) const; //Get the sum of all edges virtual ValueType_ getEdgeSum() const; }; // cublasIxamax inline cublasStatus_t cublasIxamax(cublasHandle_t handle, int n, const float *x, int incx, int *result) { return cublasIsamax(handle, n, x, incx, result); } inline cublasStatus_t cublasIxamax(cublasHandle_t handle, int n, const double *x, int incx, int *result) { return cublasIdamax(handle, n, x, incx, result); } // cublasIxamin inline cublasStatus_t cublasIxamin(cublasHandle_t handle, int n, const float *x, int incx, int *result) { return cublasIsamin(handle, n, x, incx, result); } inline cublasStatus_t cublasIxamin(cublasHandle_t handle, int n, const double *x, int incx, int *result) { return cublasIdamin(handle, n, x, incx, result); } // cublasXasum inline cublasStatus_t cublasXasum(cublasHandle_t handle, int n, const float *x, int incx, float *result) { return cublasSasum(handle, n, x, incx, result); } inline cublasStatus_t cublasXasum(cublasHandle_t handle, int n, const double *x, int incx, double *result) { return cublasDasum(handle, n, x, incx, result); } // cublasXaxpy inline cublasStatus_t cublasXaxpy(cublasHandle_t handle, int n, const float * alpha, const float * x, int incx, float * y, int incy) { return cublasSaxpy(handle, n, alpha, x, incx, y, incy); } inline cublasStatus_t cublasXaxpy(cublasHandle_t handle, int n, const double *alpha, const double *x, int incx, double *y, int incy) { return cublasDaxpy(handle, n, alpha, x, incx, y, incy); } // cublasXcopy inline cublasStatus_t cublasXcopy(cublasHandle_t handle, int n, const float *x, int incx, float *y, int incy) { return cublasScopy(handle, n, x, incx, y, incy); } inline cublasStatus_t cublasXcopy(cublasHandle_t handle, int n, const double *x, int incx, double *y, int incy) { return cublasDcopy(handle, n, x, incx, y, incy); } // cublasXdot inline cublasStatus_t cublasXdot(cublasHandle_t handle, int n, const float *x, int incx, const float *y, int incy, float *result) { return cublasSdot(handle, n, x, incx, y, incy, result); } inline cublasStatus_t cublasXdot(cublasHandle_t handle, int n, const double *x, int incx, const double *y, int incy, double *result) { return cublasDdot(handle, n, x, incx, y, incy, result); } // cublasXnrm2 inline cublasStatus_t cublasXnrm2(cublasHandle_t handle, int n, const float *x, int incx, float *result) { return cublasSnrm2(handle, n, x, incx, result); } inline cublasStatus_t cublasXnrm2(cublasHandle_t handle, int n, const double *x, int incx, double *result) { return cublasDnrm2(handle, n, x, incx, result); } // cublasXscal inline cublasStatus_t cublasXscal(cublasHandle_t handle, int n, const float *alpha, float *x, int incx) { return cublasSscal(handle, n, alpha, x, incx); } inline cublasStatus_t cublasXscal(cublasHandle_t handle, int n, const double *alpha, double *x, int incx) { return cublasDscal(handle, n, alpha, x, incx); } // cublasXgemv inline cublasStatus_t cublasXgemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float *y, int incy) { return cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } inline cublasStatus_t cublasXgemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const double *alpha, const double *A, int lda, const double *x, int incx, const double *beta, double *y, int incy) { return cublasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } // cublasXger inline cublasStatus_t cublasXger(cublasHandle_t handle, int m, int n, const float *alpha, const float *x, int incx, const float *y, int incy, float *A, int lda) { return cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda); } inline cublasStatus_t cublasXger(cublasHandle_t handle, int m, int n, const double *alpha, const double *x, int incx, const double *y, int incy, double *A, int lda) { return cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda); } // cublasXgemm inline cublasStatus_t cublasXgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc) { return cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } inline cublasStatus_t cublasXgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc) { return cublasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } // cublasXgeam inline cublasStatus_t cublasXgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const float *alpha, const float *A, int lda, const float *beta, const float *B, int ldb, float *C, int ldc) { return cublasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } inline cublasStatus_t cublasXgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const double *alpha, const double *A, int lda, const double *beta, const double *B, int ldb, double *C, int ldc) { return cublasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } // cublasXtrsm inline cublasStatus_t cublasXtrsm(cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int m, int n, const float *alpha, const float *A, int lda, float *B, int ldb) { return cublasStrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb); } inline cublasStatus_t cublasXtrsm(cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int m, int n, const double *alpha, const double *A, int lda, double *B, int ldb) { return cublasDtrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb); } // curandGeneratorNormalX inline curandStatus_t curandGenerateNormalX(curandGenerator_t generator, float * outputPtr, size_t n, float mean, float stddev) { return curandGenerateNormal(generator, outputPtr, n, mean, stddev); } inline curandStatus_t curandGenerateNormalX(curandGenerator_t generator, double * outputPtr, size_t n, double mean, double stddev) { return curandGenerateNormalDouble(generator, outputPtr, n, mean, stddev); } // cusolverXpotrf_bufferSize inline cusolverStatus_t cusolverXpotrf_bufferSize(cusolverDnHandle_t handle, int n, float *A, int lda, int *Lwork){ return cusolverDnSpotrf_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,A,lda,Lwork); } inline cusolverStatus_t cusolverXpotrf_bufferSize(cusolverDnHandle_t handle, int n, double *A, int lda, int *Lwork){ return cusolverDnDpotrf_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,A,lda,Lwork); } // cusolverXpotrf inline cusolverStatus_t cusolverXpotrf(cusolverDnHandle_t handle, int n, float *A, int lda, float *Workspace, int Lwork, int *devInfo){ return cusolverDnSpotrf(handle,CUBLAS_FILL_MODE_LOWER,n,A,lda,Workspace,Lwork,devInfo); } inline cusolverStatus_t cusolverXpotrf(cusolverDnHandle_t handle, int n, double *A, int lda, double *Workspace, int Lwork, int *devInfo){ return cusolverDnDpotrf(handle,CUBLAS_FILL_MODE_LOWER,n,A,lda,Workspace,Lwork,devInfo); } // cusolverXgesvd_bufferSize inline cusolverStatus_t cusolverXgesvd_bufferSize(cusolverDnHandle_t handle, int m, int n, float *A, int lda, float *U, int ldu, float *VT, int ldvt, int *Lwork){ //ideally //char jobu = 'O'; //char jobvt= 'N'; //only supported //char jobu = 'A'; //char jobvt= 'A'; return cusolverDnSgesvd_bufferSize(handle,m,n,Lwork); } inline cusolverStatus_t cusolverXgesvd_bufferSize(cusolverDnHandle_t handle, int m, int n, double *A, int lda, double *U, int ldu, double *VT, int ldvt, int *Lwork){ //ideally //char jobu = 'O'; //char jobvt= 'N'; //only supported //char jobu = 'A'; //char jobvt= 'A'; return cusolverDnDgesvd_bufferSize(handle,m,n,Lwork); } // cusolverXgesvd inline cusolverStatus_t cusolverXgesvd(cusolverDnHandle_t handle, int m, int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, float *Work, int Lwork, float *rwork, int *devInfo){ //ideally //char jobu = 'O'; //char jobvt= 'N'; //only supported char jobu = 'A'; char jobvt= 'A'; return cusolverDnSgesvd(handle,jobu,jobvt,m,n,A,lda,S,U,ldu,VT,ldvt,Work,Lwork,rwork,devInfo); } inline cusolverStatus_t cusolverXgesvd(cusolverDnHandle_t handle, int m, int n, double *A, int lda, double *S, double *U, int ldu, double *VT, int ldvt, double *Work, int Lwork, double *rwork, int *devInfo){ //ideally //char jobu = 'O'; //char jobvt= 'N'; //only supported char jobu = 'A'; char jobvt= 'A'; return cusolverDnDgesvd(handle,jobu,jobvt,m,n,A,lda,S,U,ldu,VT,ldvt,Work,Lwork,rwork,devInfo); } // cusolverXgesvd_cond inline cusolverStatus_t cusolverXgesvd_cond(cusolverDnHandle_t handle, int m, int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, float *Work, int Lwork, float *rwork, int *devInfo){ //ideally //char jobu = 'N'; //char jobvt= 'N'; //only supported char jobu = 'A'; char jobvt= 'A'; return cusolverDnSgesvd(handle,jobu,jobvt,m,n,A,lda,S,U,ldu,VT,ldvt,Work,Lwork,rwork,devInfo); } inline cusolverStatus_t cusolverXgesvd_cond(cusolverDnHandle_t handle, int m, int n, double *A, int lda, double *S, double *U, int ldu, double *VT, int ldvt, double *Work, int Lwork, double *rwork, int *devInfo){ //ideally //char jobu = 'N'; //char jobvt= 'N'; //only supported char jobu = 'A'; char jobvt= 'A'; return cusolverDnDgesvd(handle,jobu,jobvt,m,n,A,lda,S,U,ldu,VT,ldvt,Work,Lwork,rwork,devInfo); } // cusparseXcsrmv inline cusparseStatus_t cusparseXcsrmv(cusparseHandle_t handle, cusparseOperation_t transA, int m, int n, int nnz, const float * alpha, const cusparseMatDescr_t descrA, const float * csrValA, const int * csrRowPtrA, const int * csrColIndA, const float * x, const float * beta, float *y) { return cusparseScsrmv_mp(handle, transA, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y); } inline cusparseStatus_t cusparseXcsrmv(cusparseHandle_t handle, cusparseOperation_t transA, int m, int n, int nnz, const double * alpha, const cusparseMatDescr_t descrA, const double * csrValA, const int * csrRowPtrA, const int * csrColIndA, const double * x, const double * beta, double *y) { return cusparseDcsrmv_mp(handle, transA, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y); } // cusparseXcsrmm inline cusparseStatus_t cusparseXcsrmm(cusparseHandle_t handle, cusparseOperation_t transA, int m, int n, int k, int nnz, const float *alpha, const cusparseMatDescr_t descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const float *B, int ldb, const float *beta, float *C, int ldc) { return cusparseScsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } inline cusparseStatus_t cusparseXcsrmm(cusparseHandle_t handle, cusparseOperation_t transA, int m, int n, int k, int nnz, const double *alpha, const cusparseMatDescr_t descrA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, const double *B, int ldb, const double *beta, double *C, int ldc) { return cusparseDcsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } // cusparseXcsrgeam inline cusparseStatus_t cusparseXcsrgeam(cusparseHandle_t handle, int m, int n, const float *alpha, const cusparseMatDescr_t descrA, int nnzA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const float *beta, const cusparseMatDescr_t descrB, int nnzB, const float *csrValB, const int *csrRowPtrB, const int *csrColIndB, const cusparseMatDescr_t descrC, float *csrValC, int *csrRowPtrC, int *csrColIndC) { return cusparseScsrgeam(handle,m,n, alpha,descrA,nnzA,csrValA,csrRowPtrA,csrColIndA, beta,descrB,nnzB,csrValB,csrRowPtrB,csrColIndB, descrC,csrValC,csrRowPtrC,csrColIndC); } inline cusparseStatus_t cusparseXcsrgeam(cusparseHandle_t handle, int m, int n, const double *alpha, const cusparseMatDescr_t descrA, int nnzA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, const double *beta, const cusparseMatDescr_t descrB, int nnzB, const double *csrValB, const int *csrRowPtrB, const int *csrColIndB, const cusparseMatDescr_t descrC, double *csrValC, int *csrRowPtrC, int *csrColIndC) { return cusparseDcsrgeam(handle,m,n, alpha,descrA,nnzA,csrValA,csrRowPtrA,csrColIndA, beta,descrB,nnzB,csrValB,csrRowPtrB,csrColIndB, descrC,csrValC,csrRowPtrC,csrColIndC); } //ILU0, incomplete-LU with 0 threshhold (CUSPARSE) inline cusparseStatus_t cusparseXcsrilu0(cusparseHandle_t handle, cusparseOperation_t trans, int m, const cusparseMatDescr_t descrA, float *csrValM, const int *csrRowPtrA, const int *csrColIndA, cusparseSolveAnalysisInfo_t info){ return cusparseScsrilu0(handle,trans,m,descrA,csrValM,csrRowPtrA,csrColIndA,info); } inline cusparseStatus_t cusparseXcsrilu0(cusparseHandle_t handle, cusparseOperation_t trans, int m, const cusparseMatDescr_t descrA, double *csrValM, const int *csrRowPtrA, const int *csrColIndA, cusparseSolveAnalysisInfo_t info){ return cusparseDcsrilu0(handle,trans,m,descrA,csrValM,csrRowPtrA,csrColIndA,info); } //IC0, incomplete-Cholesky with 0 threshhold (CUSPARSE) inline cusparseStatus_t cusparseXcsric0(cusparseHandle_t handle, cusparseOperation_t trans, int m, const cusparseMatDescr_t descrA, float *csrValM, const int *csrRowPtrA, const int *csrColIndA, cusparseSolveAnalysisInfo_t info){ return cusparseScsric0(handle,trans,m,descrA,csrValM,csrRowPtrA,csrColIndA,info); } inline cusparseStatus_t cusparseXcsric0(cusparseHandle_t handle, cusparseOperation_t trans, int m, const cusparseMatDescr_t descrA, double *csrValM, const int *csrRowPtrA, const int *csrColIndA, cusparseSolveAnalysisInfo_t info){ return cusparseDcsric0(handle,trans,m,descrA,csrValM,csrRowPtrA,csrColIndA,info); } //sparse triangular solve (CUSPARSE) //analysis phase inline cusparseStatus_t cusparseXcsrsm_analysis (cusparseHandle_t handle, cusparseOperation_t transa, int m, int nnz, const cusparseMatDescr_t descra, const float *a, const int *ia, const int *ja, cusparseSolveAnalysisInfo_t info){ return cusparseScsrsm_analysis(handle,transa,m,nnz,descra,a,ia,ja,info); } inline cusparseStatus_t cusparseXcsrsm_analysis (cusparseHandle_t handle, cusparseOperation_t transa, int m, int nnz, const cusparseMatDescr_t descra, const double *a, const int *ia, const int *ja, cusparseSolveAnalysisInfo_t info){ return cusparseDcsrsm_analysis(handle,transa,m,nnz,descra,a,ia,ja,info); } //solve phase inline cusparseStatus_t cusparseXcsrsm_solve (cusparseHandle_t handle, cusparseOperation_t transa, int m, int k, float alpha, const cusparseMatDescr_t descra, const float *a, const int *ia, const int *ja, cusparseSolveAnalysisInfo_t info, const float *x, int ldx, float *y, int ldy){ return cusparseScsrsm_solve(handle,transa,m,k,&alpha,descra,a,ia,ja,info,x,ldx,y,ldy); } inline cusparseStatus_t cusparseXcsrsm_solve (cusparseHandle_t handle, cusparseOperation_t transa, int m, int k, double alpha, const cusparseMatDescr_t descra, const double *a, const int *ia, const int *ja, cusparseSolveAnalysisInfo_t info, const double *x, int ldx, double *y, int ldy){ return cusparseDcsrsm_solve(handle,transa,m,k,&alpha,descra,a,ia,ja,info,x,ldx,y,ldy); } inline cusparseStatus_t cusparseXcsrcolor(cusparseHandle_t handle, int m, int nnz, const cusparseMatDescr_t descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const float *fractionToColor, int *ncolors, int *coloring, int *reordering,cusparseColorInfo_t info) { return cusparseScsrcolor(handle,m,nnz,descrA,csrValA,csrRowPtrA,csrColIndA,fractionToColor,ncolors,coloring,reordering,info); } inline cusparseStatus_t cusparseXcsrcolor(cusparseHandle_t handle, int m, int nnz, const cusparseMatDescr_t descrA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, const double *fractionToColor, int *ncolors, int *coloring, int *reordering,cusparseColorInfo_t info) { return cusparseDcsrcolor(handle,m,nnz,descrA,csrValA,csrRowPtrA,csrColIndA,fractionToColor,ncolors,coloring,reordering,info); } }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/graph_visitors.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef GRAPH_VISITORS_HXX #define GRAPH_VISITORS_HXX namespace nvgraph { //PROBLEM: using Visitor Design Pattern over a // hierarchy of visitees that depend on // different number of template arguments // //SOLUTION:use Acyclic Visitor // (A. Alexandrescu, "Modern C++ Design", Section 10.4), // where *concrete* Visitors must be parameterized by all // the possibile template args of the Visited classes (visitees); // struct VisitorBase { virtual ~VisitorBase(void) { } }; template<typename T> struct Visitor { virtual void Visit(T& ) = 0; virtual ~Visitor() { } }; }//end namespace #endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/valued_csr_graph.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "csr_graph.hxx" #include "nvgraph_vector.hxx" namespace nvgraph { /*! A ValuedCsrGraph is a graph strored in a CSR data structure. It represents an weighted graph and has storage for row_offsets and column_indices and values */ template <typename IndexType_, typename ValueType_> class ValuedCsrGraph : public nvgraph::CsrGraph<IndexType_> { public: typedef IndexType_ IndexType; typedef ValueType_ ValueType; private: typedef nvgraph::CsrGraph<IndexType> Parent; protected: /*! Storage for the nonzero entries of the CSR data structure. */ SHARED_PREFIX::shared_ptr<ValueType> values; public: /*! Construct an empty \p ValuedCsrGraph. */ ValuedCsrGraph(void) {} /*! Destruct a \p ValuedCsrGraph. */ ~ValuedCsrGraph(void) {} /*! Construct a \p ValuedCsrGraph with a specific shape and number of nonzero entries. * * \param num_rows Number of rows. * \param num_entries Number of nonzero graph entries. */ ValuedCsrGraph(size_t num_rows, size_t num_entries, cudaStream_t stream) : Parent(num_rows, num_entries, stream), values(allocateDevice<ValueType>(num_entries, NULL)) {} /*! Construct a \p ValuedCsrGraph from another graph. * * \param ValuedCsrGraph Another graph in csr */ ValuedCsrGraph(const ValuedCsrGraph& gr): Parent(gr), values(gr.values) {} /*! Construct a \p ValuedCsrGraph from another graph. * * \param ValuedCsrGraph Another graph in csr */ ValuedCsrGraph(const Parent& gr, Vector<ValueType>& vals): Parent(gr), values(vals.raw()) { } inline ValueType* get_raw_values() const { return values.get(); } /*! Swap the contents of two \p ValuedCsrGraph objects. * * \param graph Another graph in csr */ void swap(ValuedCsrGraph& graph); /*! Assignment from another graph. * * \param graph Another graph in csr */ ValuedCsrGraph& operator=(const ValuedCsrGraph& graph); //Accept method injection DEFINE_VISITABLE(IndexType_) }; // class ValuedCsrGraph }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/graph_contracting_structs.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef GRAPH_CONTRACTING_STRUCTS_HXX #define GRAPH_CONTRACTING_STRUCTS_HXX #include <nvgraph_error.hxx> #include <multi_valued_csr_graph.hxx> //which includes all other headers... #include <range_view.hxx> // TODO: to be changed to thrust/range_view.h, when toolkit gets in sync with Thrust #include <thrust_traits.hxx> //from amgx/amg/base/include/sm_utils.inl //{ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) #define __PTR "l" #else #define __PTR "r" #endif //} namespace nvgraph { //from amgx/amg/base/include/sm_utils.inl //{ namespace utils { // ==================================================================================================================== // Warp tools. // ==================================================================================================================== static __device__ __forceinline__ int lane_id() { int id; asm( "mov.u32 %0, %%laneid;" : "=r"(id) ); return id; } static __device__ __forceinline__ int lane_mask_lt() { int mask; asm( "mov.u32 %0, %%lanemask_lt;" : "=r"(mask) ); return mask; } static __device__ __forceinline__ int warp_id() { return threadIdx.x >> 5; } // ==================================================================================================================== // Atomics. // ==================================================================================================================== static __device__ __forceinline__ void atomic_add( float *address, float value ) { atomicAdd( address, value ); } static __device__ __forceinline__ void atomic_add( double *address, double value ) { unsigned long long *address_as_ull = (unsigned long long *) address; unsigned long long old = __double_as_longlong( address[0] ), assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __double_as_longlong( value + __longlong_as_double( assumed ) ) ); } while( assumed != old ); } // ==================================================================================================================== // Bit tools. // ==================================================================================================================== static __device__ __forceinline__ int bfe( int src, int num_bits ) { unsigned mask; asm( "bfe.u32 %0, %1, 0, %2;" : "=r"(mask) : "r"(src), "r"(num_bits) ); return mask; } static __device__ __forceinline__ int bfind( int src ) { int msb; asm( "bfind.u32 %0, %1;" : "=r"(msb) : "r"(src) ); return msb; } static __device__ __forceinline__ int bfind( unsigned long long src ) { int msb; asm( "bfind.u64 %0, %1;" : "=r"(msb) : "l"(src) ); return msb; } // ==================================================================================================================== // Shuffle. // ==================================================================================================================== static __device__ __forceinline__ float shfl( float r, int lane, int bound = 32) { #if __CUDA_ARCH__ >= 300 return __shfl( r, lane, bound ); #else return 0.0f; #endif } static __device__ __forceinline__ double shfl( double r, int lane, int bound=32 ) { #if __CUDA_ARCH__ >= 300 int hi = __shfl( __double2hiint(r), lane, bound ); int lo = __shfl( __double2loint(r), lane, bound ); return __hiloint2double( hi, lo ); #else return 0.0; #endif } static __device__ __forceinline__ float shfl_xor( float r, int mask, int bound=32 ) { #if __CUDA_ARCH__ >= 300 return __shfl_xor( r, mask, bound ); #else return 0.0f; #endif } static __device__ __forceinline__ double shfl_xor( double r, int mask, int bound=32 ) { #if __CUDA_ARCH__ >= 300 int hi = __shfl_xor( __double2hiint(r), mask, bound ); int lo = __shfl_xor( __double2loint(r), mask, bound ); return __hiloint2double( hi, lo ); #else return 0.0; #endif } // ==================================================================================================================== // Loads. // ==================================================================================================================== enum Ld_mode { LD_AUTO = 0, LD_CA, LD_CG, LD_TEX, LD_NC }; template< Ld_mode Mode > struct Ld {}; template<> struct Ld<LD_AUTO> { template< typename T > static __device__ __forceinline__ T load( const T *ptr ) { return *ptr; } }; template<> struct Ld<LD_CG> { static __device__ __forceinline__ int load( const int *ptr ) { int ret; asm volatile ( "ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __PTR(ptr) ); return ret; } static __device__ __forceinline__ float load( const float *ptr ) { float ret; asm volatile ( "ld.global.cg.f32 %0, [%1];" : "=f"(ret) : __PTR(ptr) ); return ret; } static __device__ __forceinline__ double load( const double *ptr ) { double ret; asm volatile ( "ld.global.cg.f64 %0, [%1];" : "=d"(ret) : __PTR(ptr) ); return ret; } }; template<> struct Ld<LD_CA> { static __device__ __forceinline__ int load( const int *ptr ) { int ret; asm volatile ( "ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __PTR(ptr) ); return ret; } static __device__ __forceinline__ float load( const float *ptr ) { float ret; asm volatile ( "ld.global.ca.f32 %0, [%1];" : "=f"(ret) : __PTR(ptr) ); return ret; } static __device__ __forceinline__ double load( const double *ptr ) { double ret; asm volatile ( "ld.global.ca.f64 %0, [%1];" : "=d"(ret) : __PTR(ptr) ); return ret; } }; template<> struct Ld<LD_NC> { template< typename T > static __device__ __forceinline__ T load( const T *ptr ) { return __ldg( ptr ); } }; template < typename T, typename POD_TYPE = T > struct util; template <> struct util <float, float > { typedef double uptype; typedef float downtype; static const bool is_real = true; static const bool is_complex = false; static __host__ __device__ __inline__ float get_zero(){ return 0.f; } static __host__ __device__ __inline__ float get_one(){ return 1.f; } static __host__ __device__ __inline__ float get_minus_one(){ return -1.f; } // exact comaprison, which might result wrong answer in a lot of cases static __host__ __device__ __inline__ bool is_zero(const float& val){ return val == get_zero(); } static __host__ __device__ __inline__ bool is_equal(const float& val1, const float& val2) { return val1 == val2;} ; static __host__ __device__ __inline__ float invert(const float& val) {return -val;} static __host__ __device__ __inline__ float conjugate(const float& val) {return val;} static __host__ __device__ __inline__ void invert_inplace(float& val) {val = -val;} static __host__ __device__ __inline__ void conjugate_inplace(float& val) {} static __host__ __device__ __inline__ float abs (const float& val) { return fabs(val); } template <typename V> static __host__ __device__ __inline__ void to_uptype (const float& src, V& dst) { dst = (V)(src); } static __host__ __device__ __inline__ float to_downtype (const float& src) { return src; } static __host__ __device__ __inline__ float volcast (const volatile float& val) {return val;} static __host__ __device__ __inline__ void volcast (const float& val, volatile float* ret) {*ret = val;} /*template <typename M> static __host__ __device__ __inline__ float mul(const float& val, const M& mult) { static_assert(util<M>::is_real(), "Multiply is supported for real constant only"); return val*mult; }*/ static void printf(const char* fmt, const float& val) { ::printf(fmt, val); } static void fprintf(FILE* f, const char* fmt, const float& val) { ::fprintf(f, fmt, val); } }; template <> struct util <double, double> { typedef double uptype; typedef float downtype; static const bool is_real = true; static const bool is_complex = false; static __host__ __device__ __inline__ double get_zero(){ return 0.; } static __host__ __device__ __inline__ double get_one(){ return 1.; } static __host__ __device__ __inline__ double get_minus_one(){ return -1.; } static __host__ __device__ __inline__ bool is_zero(const double& val){ return val == get_zero(); } static __host__ __device__ __inline__ bool is_equal(const double& val1, double& val2) { return val1 == val2;} ; static __host__ __device__ __inline__ double invert(const double& val) {return -val;} static __host__ __device__ __inline__ double conjugate(const double& val) {return val;} static __host__ __device__ __inline__ void invert_inplace(double& val) {val = -val;} static __host__ __device__ __inline__ void conjugate_inplace(double& val) {} static __host__ __device__ __inline__ double abs (const double& val) { return fabs(val); } template <typename V> static __host__ __device__ __inline__ void to_uptype (const float& src, V& dst) { dst = (V)(src); } static __host__ __device__ __inline__ float to_downtype (const float& src) { return (float)src; } static __host__ __device__ __inline__ double volcast (const volatile double& val) {return val;} static __host__ __device__ __inline__ void volcast (const double& val, volatile double* ret) {*ret = val;} /* template <typename M> static __host__ __device__ __inline__ double mulf(const double& val, const M& mult) { static_assert(util<M>::is_real(), "Multiply is supported for real constant only"); return val*mult; }*/ static void printf(const char* fmt, const double& val) { ::printf(fmt, val); } static void fprintf(FILE* f, const char* fmt,const double& val) { ::fprintf(f, fmt, val); } }; // ==================================================================================================================== // Warp-level reductions. // ==================================================================================================================== struct Add { template< typename Value_type > static __device__ __forceinline__ Value_type eval( Value_type x, Value_type y ) { return x+y; } }; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 template< int NUM_THREADS_PER_ITEM, int WARP_SIZE > struct Warp_reduce_pow2 { template< typename Operator, typename Value_type > static __device__ __inline__ Value_type execute( Value_type x ) { #pragma unroll for( int mask = WARP_SIZE / 2 ; mask >= NUM_THREADS_PER_ITEM ; mask >>= 1 ) x = Operator::eval( x, shfl_xor(x, mask) ); return x; } }; template< int NUM_THREADS_PER_ITEM, int WARP_SIZE > struct Warp_reduce_linear { template< typename Operator, typename Value_type > static __device__ __inline__ Value_type execute( Value_type x ) { const int NUM_STEPS = WARP_SIZE / NUM_THREADS_PER_ITEM; int my_lane_id = utils::lane_id(); #pragma unroll for( int i = 1 ; i < NUM_STEPS ; ++i ) { Value_type y = shfl_down( x, i*NUM_THREADS_PER_ITEM ); if( my_lane_id < NUM_THREADS_PER_ITEM ) x = Operator::eval( x, y ); } return x; } }; #else template< int NUM_THREADS_PER_ITEM, int WARP_SIZE > struct Warp_reduce_pow2 { template< typename Operator, typename Value_type > static __device__ __inline__ Value_type execute( volatile Value_type *smem, Value_type x ) { int my_lane_id = utils::lane_id(); #pragma unroll for( int offset = WARP_SIZE / 2 ; offset >= NUM_THREADS_PER_ITEM ; offset >>= 1 ) if( my_lane_id < offset ) { x = Operator::eval( x, smem[threadIdx.x+offset] ); util<Value_type>::volcast(x, smem + threadIdx.x); } return x; } }; template< int NUM_THREADS_PER_ITEM, int WARP_SIZE > struct Warp_reduce_linear { template< typename Operator, typename Value_type > static __device__ __inline__ Value_type execute( volatile Value_type *smem, Value_type x ) { const int NUM_STEPS = WARP_SIZE / NUM_THREADS_PER_ITEM; int my_lane_id = utils::lane_id(); #pragma unroll for( int i = 1 ; i < NUM_STEPS ; ++i ) if( my_lane_id < NUM_THREADS_PER_ITEM ) { x = Operator::eval( x, smem[threadIdx.x+i*NUM_THREADS_PER_ITEM] ); util<Value_type>::volcast(x, smem + threadIdx.x); } return x; } }; #endif // ==================================================================================================================== template< int NUM_THREADS_PER_ITEM, int WARP_SIZE = 32 > struct Warp_reduce : public Warp_reduce_pow2<NUM_THREADS_PER_ITEM, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce< 3, WARP_SIZE> : public Warp_reduce_linear< 3, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce< 4, WARP_SIZE> : public Warp_reduce_linear< 4, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce< 5, WARP_SIZE> : public Warp_reduce_linear< 5, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce< 6, WARP_SIZE> : public Warp_reduce_linear< 6, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce< 7, WARP_SIZE> : public Warp_reduce_linear< 7, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce< 9, WARP_SIZE> : public Warp_reduce_linear< 9, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce<10, WARP_SIZE> : public Warp_reduce_linear<10, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce<11, WARP_SIZE> : public Warp_reduce_linear<11, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce<12, WARP_SIZE> : public Warp_reduce_linear<12, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce<13, WARP_SIZE> : public Warp_reduce_linear<13, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce<14, WARP_SIZE> : public Warp_reduce_linear<14, WARP_SIZE> {}; template< int WARP_SIZE > struct Warp_reduce<15, WARP_SIZE> : public Warp_reduce_linear<15, WARP_SIZE> {}; // ==================================================================================================================== #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 template< int NUM_THREADS_PER_ITEM, typename Operator, typename Value_type > static __device__ __forceinline__ Value_type warp_reduce( Value_type x ) { return Warp_reduce<NUM_THREADS_PER_ITEM>::template execute<Operator>( x ); } #else template< int NUM_THREADS_PER_ITEM, typename Operator, typename Value_type > static __device__ __forceinline__ Value_type warp_reduce( volatile Value_type *smem, Value_type x ) { return Warp_reduce<NUM_THREADS_PER_ITEM>::template execute<Operator>( smem, x ); } template< int NUM_THREADS_PER_ITEM, typename Value_type, int WARP_SIZE > static __device__ __forceinline__ Value_type warp_reduce_sum(volatile Value_type *smem, Value_type x) { const int NUM_STEPS = WARP_SIZE / NUM_THREADS_PER_ITEM; int my_lane_id = utils::lane_id(); #pragma unroll for (int i = 1; i < NUM_STEPS; ++i) if (my_lane_id < NUM_THREADS_PER_ITEM) { x = x + util<Value_type>::volcast(smem[threadIdx.x + i*NUM_THREADS_PER_ITEM]); util<Value_type>::volcast(x, smem + threadIdx.x); } return x; } #endif }//namespace utils //} template< typename Key_type, int SMEM_SIZE=128, int WARP_SIZE=32 > class Hash_index { public: // The number of registers needed to store the index. enum { REGS_SIZE = SMEM_SIZE / WARP_SIZE }; //private: // The partial sums of the index (stored in registers). int m_partial[REGS_SIZE]; // The index in GMEM. int *m_gmem; public: // Create an index (to be associated with a hash set). __device__ __forceinline__ Hash_index( int *gmem ) : m_gmem(gmem) {} // Build the index from a SMEM buffer of size SMEM_SIZE. __device__ __forceinline__ void build_smem_index( const volatile Key_type *s_buffer ); // Given an offset in SMEM, it finds the index. __device__ __forceinline__ int find_smem( int offset ) const; // Given an offset in GMEM, it finds the index. __device__ __forceinline__ int find_gmem( int offset ) const; // Set an indexed item in GMEM. __device__ __forceinline__ void set_gmem_index( int offset, int val ) { m_gmem[offset] = val; } }; // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int WARP_SIZE > __device__ __forceinline__ void Hash_index<Key_type, SMEM_SIZE, WARP_SIZE>::build_smem_index( const volatile Key_type *s_buffer ) { const int lane_id = utils::lane_id(); #pragma unroll for( int i = 0, offset = lane_id ; i < REGS_SIZE ; ++i, offset += WARP_SIZE ) m_partial[i] = __ballot( s_buffer[offset] != -1 ); } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int WARP_SIZE > __device__ __forceinline__ int Hash_index<Key_type, SMEM_SIZE, WARP_SIZE>::find_smem( int offset ) const { const int offset_div_warp_size = offset / WARP_SIZE; const int offset_mod_warp_size = offset % WARP_SIZE; int result = 0; #pragma unroll for( int i = 0 ; i < REGS_SIZE ; ++i ) { int mask = 0xffffffff; if( i == offset_div_warp_size ) mask = (1 << offset_mod_warp_size) - 1; if( i <= offset_div_warp_size ) result += __popc( m_partial[i] & mask ); } return result; } template< typename Key_type, int SMEM_SIZE, int WARP_SIZE > __device__ __forceinline__ int Hash_index<Key_type, SMEM_SIZE, WARP_SIZE>::find_gmem( int offset ) const { return m_gmem[offset]; } static __constant__ unsigned c_hash_keys[] = { 3499211612, 581869302, 3890346734, 3586334585, 545404204, 4161255391, 3922919429, 949333985, 2715962298, 1323567403, 418932835, 2350294565, 1196140740, 809094426, 2348838239, 4264392720 }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Key_type, int SMEM_SIZE=128, int NUM_HASH_FCTS=4, int WARP_SIZE=32 > class Hash_set { // Associated index. typedef Hash_index<Key_type, SMEM_SIZE, WARP_SIZE> Index; protected: // The size of the table (occupancy). int m_smem_count, m_gmem_count; // The keys stored in the hash table. volatile Key_type *m_smem_keys, *m_gmem_keys; // The size of the global memory buffer. const int m_gmem_size; // Is it ok? bool m_fail; // DEBUG // bool m_print; // END OF DEBUG. public: // Constructor. __device__ __forceinline__ Hash_set( volatile Key_type *smem_keys, volatile Key_type *gmem_keys, int gmem_size ) : m_smem_count(0), m_gmem_count(1), m_smem_keys (smem_keys), m_gmem_keys (gmem_keys), m_gmem_size (gmem_size), m_fail (false) // DEBUG // , m_print(true) // END OF DEBUG {} // Clear the table. __device__ __forceinline__ void clear( bool skip_gmem = false ); // Compute the size of the table. Only thread with lane_id==0 gives the correct result (no broadcast of the value). __device__ __forceinline__ int compute_size(); // Compute the size of the table. Only thread with lane_id==0 gives the correct result (no broadcast of the value). __device__ __forceinline__ int compute_size_with_duplicates(); // Does the set contain those values? __device__ __forceinline__ bool contains( Key_type key ) const; // Find an index. __device__ __forceinline__ int find_index( Key_type key, const Index &index, bool print_debug ) const; // Has the process failed. __device__ __forceinline__ bool has_failed() const { return m_fail; } // Insert a key inside the set. If status is NULL, ignore failure. __device__ __forceinline__ void insert( Key_type key, int *status ); // Load a set. __device__ __forceinline__ void load( int count, const Key_type *keys, const int *pos ); // Load a set and use it as an index. __device__ __forceinline__ void load_index( int count, const Key_type *keys, const int *pos, Index &index, bool print_debug ); // Store a set. __device__ __forceinline__ void store( int count, Key_type *keys ); // Store a set. __device__ __forceinline__ int store_with_positions( Key_type *keys, int *pos ); // Store a set. __device__ __forceinline__ int store( Key_type *keys ); }; // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE> __device__ __forceinline__ void Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::clear( bool skip_gmem ) { int lane_id = utils::lane_id(); const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) m_smem_keys[i_step*WARP_SIZE + lane_id] = -1; m_smem_count = 0; if( skip_gmem || m_gmem_count == 0 ) { m_gmem_count = 0; return; } #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) m_gmem_keys[offset] = -1; m_gmem_count = 0; } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE> __device__ __forceinline__ int Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::compute_size() { m_smem_count += m_gmem_count; #pragma unroll for( int offset = WARP_SIZE/2 ; offset > 0 ; offset >>= 1 ) m_smem_count += __shfl_xor( m_smem_count, offset ); m_gmem_count = __any( m_gmem_count > 0 ); return m_smem_count; } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE> __device__ __forceinline__ int Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::compute_size_with_duplicates() { int lane_id = utils::lane_id(); // Count the number of keys in SMEM. int sum = 0; const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) { const int offset = i_step*WARP_SIZE + lane_id; Key_type key = m_smem_keys[offset]; sum += __popc( __ballot( key != -1 ) ); } // Is there any key in GMEM. If not, just quit. m_gmem_count = __any(m_gmem_count > 0); if( !m_gmem_count ) return sum; // Count the number of keys in GMEM. #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) { Key_type key = m_gmem_keys[offset]; sum += __popc( __ballot( key != -1 ) ); } return sum; } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE> __device__ __forceinline__ bool Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::contains( Key_type key ) const { bool done = key == -1, found = false; #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) return found; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = ( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash] ) & (SMEM_SIZE-1); if( !done ) { Key_type stored_key = m_smem_keys[hash]; if( stored_key == key ) found = true; if( found || stored_key == -1 ) done = true; } } const int num_bits = utils::bfind( m_gmem_size ); // TODO: move it outside ::insert. #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) return found; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = utils::bfe( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash], num_bits ); if( !done ) { Key_type stored_key = m_gmem_keys[hash]; if( stored_key == key ) found = true; if( found || stored_key == -1 ) done = true; } } return found; } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ int Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::find_index( Key_type key, const Index &index, bool print_debug ) const { int idx = -1; bool done = key == -1; #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) return idx; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = ( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash] ) & (SMEM_SIZE-1); int result = index.find_smem(hash); if( !done ) { Key_type stored_key = m_smem_keys[hash]; if( stored_key == key ) { idx = result; done = true; } } } const int num_bits = utils::bfind( m_gmem_size ); // TODO: move it outside ::insert. #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) return idx; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = utils::bfe( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash], num_bits ); if( !done ) { Key_type stored_key = m_gmem_keys[hash]; if( stored_key == key ) { idx = index.find_gmem(hash); done = true; } } } // if( key != -1 && idx == -1 ) // printf( "ERROR: Couldn't find the index!!!!\n"); return idx; } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::insert( Key_type key, int *status ) { bool done = key == -1; #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) return; bool candidate = false; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = ( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash] ) & (SMEM_SIZE-1); if( !done ) { Key_type stored_key = m_smem_keys[hash]; if( stored_key == key ) done = true; candidate = stored_key == -1; if( candidate ) m_smem_keys[hash] = key; if( candidate && key == m_smem_keys[hash] ) // More than one candidate may have written to that slot. { m_smem_count++; done = true; } } } const int num_bits = utils::bfind( m_gmem_size ); // TODO: move it outside ::insert. #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) return; bool candidate = false; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = utils::bfe( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash], num_bits ); if( !done ) { Key_type stored_key = m_gmem_keys[hash]; if( stored_key == key ) done = true; candidate = stored_key == -1; if( candidate ) m_gmem_keys[hash] = key; if( candidate && key == m_gmem_keys[hash] ) // More than one candidate may have written to that slot. { m_gmem_count++; done = true; } } } if( __all(done) ) return; assert( status != NULL ); if( utils::lane_id() == 0 ) *status = 1; m_fail = true; } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::load( int count, const Key_type *keys, const int *pos ) { int lane_id = utils::lane_id(); #pragma unroll 4 for( int offset = lane_id ; offset < count ; offset += WARP_SIZE ) { Key_type key = keys[offset]; int idx = pos [offset]; // Where to store the item. volatile Key_type *ptr = m_smem_keys; if( idx >= SMEM_SIZE ) { ptr = m_gmem_keys; m_gmem_count = 1; idx -= SMEM_SIZE; } // Store the item. ptr[idx] = key; } m_gmem_count = __any( m_gmem_count ); } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::load_index( int count, const Key_type *keys, const int *pos, Index &index, bool print_debug ) { #pragma unroll 4 for( int offset = utils::lane_id() ; offset < count ; offset += WARP_SIZE ) { Key_type key = keys[offset]; int idx = pos [offset]; // Store the item. volatile Key_type *ptr = m_smem_keys; if( idx >= SMEM_SIZE ) { ptr = m_gmem_keys; m_gmem_count = 1; idx -= SMEM_SIZE; index.set_gmem_index( idx, offset ); } // Store the item. ptr[idx] = key; } // Build the local index. index.build_smem_index( m_smem_keys ); m_gmem_count = __any( m_gmem_count ); } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::store( int count, Key_type *keys ) { int lane_id = utils::lane_id(); int lane_mask_lt = utils::lane_mask_lt(); int warp_offset = 0; const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) { const int offset = i_step*WARP_SIZE + lane_id; Key_type key = m_smem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) keys[dst_offset] = key; warp_offset += __popc( poll ); } m_gmem_count = __any( m_gmem_count > 0 ); if( !m_gmem_count ) return; #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) { Key_type key = m_gmem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) keys[dst_offset] = key; warp_offset += __popc( poll ); } } // ==================================================================================================================== template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ int Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::store_with_positions( Key_type *keys, int *pos ) { int lane_id = utils::lane_id(); int lane_mask_lt = utils::lane_mask_lt(); int warp_offset = 0; const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) { const int offset = i_step*WARP_SIZE + lane_id; Key_type key = m_smem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = key; pos [dst_offset] = offset; } warp_offset += __popc( poll ); } m_gmem_count = __any( m_gmem_count > 0 ); if( !m_gmem_count ) return warp_offset; #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) { Key_type key = m_gmem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = key; pos [dst_offset] = SMEM_SIZE + offset; } warp_offset += __popc( poll ); } return warp_offset; } template< typename Key_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ int Hash_set<Key_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::store( Key_type *keys ) { int lane_id = utils::lane_id(); int lane_mask_lt = utils::lane_mask_lt(); int warp_offset = 0; const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) { const int offset = i_step*WARP_SIZE + lane_id; Key_type key = m_smem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = key; } warp_offset += __popc( poll ); } m_gmem_count = __any( m_gmem_count > 0 ); if( !m_gmem_count ) return warp_offset; #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) { Key_type key = m_gmem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = key; } warp_offset += __popc( poll ); } return warp_offset; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// union Word { char b8[4]; int b32; }; // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE=128, int NUM_HASH_FCTS=4, int WARP_SIZE=32 > class Hash_map { protected: // The keys stored in the map. volatile Key_type *m_smem_keys, *m_gmem_keys; // Vote buffer for values. volatile Word *m_smem_vote; // Registers to store values. T m_regs_vals[4]; // The values stored in the map. T *m_gmem_vals; // The size of the global memory buffer. const int m_gmem_size; // Is there any value in GMEM. bool m_any_gmem; public: // Constructor. __device__ __forceinline__ Hash_map( volatile Key_type *smem_keys, volatile Key_type *gmem_keys, volatile Word *smem_vote, T *gmem_vals, int gmem_size ) : m_smem_keys(smem_keys), m_gmem_keys(gmem_keys), m_smem_vote(smem_vote), m_gmem_vals(gmem_vals), m_gmem_size(gmem_size), m_any_gmem (true) {} // Clear the table. It doesn't clear GMEM values. __device__ __forceinline__ void clear(); // Clear the table. It also clears GMEM values (set them to 0). __device__ __forceinline__ void clear_all(); // Insert a key/value inside the hash table. __device__ __forceinline__ void insert( Key_type key, T a_value, T b_value, int *status ); // Insert a key/value inside the hash table. __device__ __forceinline__ void insert_with_duplicates( Key_type key, T val, int *status ); // Load a set. __device__ __forceinline__ void load( int count, const Key_type *keys, const int *pos ); // Store the map. __device__ __forceinline__ void store( int count, T *vals ); // Store the map. __device__ __forceinline__ void store( int count, Key_type *keys, T *vals ); // Store the map. __device__ __forceinline__ void store_map_keys_scale_values( int count, const int *map, Key_type *keys, T alpha, T *vals ); // Store the map. __device__ __forceinline__ void store_keys_scale_values( int count, Key_type *keys, T alpha, T *vals ); // Update a value in the table but do not insert if it doesn't exist. __device__ __forceinline__ bool update( Key_type key, T value ); protected: // Get the selected item in the register buffer. __device__ __forceinline__ int get_selected( int hash ) const { return static_cast<int>(m_smem_vote[hash%WARP_SIZE].b8[hash/WARP_SIZE]); } // Is it the selected item in the register buffer. __device__ __forceinline__ bool is_selected( int hash, int lane_id ) const { return m_smem_vote[hash%WARP_SIZE].b8[hash/WARP_SIZE] == reinterpret_cast<char&>(lane_id); } // Push my ID in the register buffer. __device__ __forceinline__ void try_selection( int hash, int lane_id ) { m_smem_vote[hash%WARP_SIZE].b8[hash/WARP_SIZE] = reinterpret_cast<char&>(lane_id); } }; // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::clear() { int lane_id = utils::lane_id(); const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) m_smem_keys[i_step*WARP_SIZE + lane_id] = -1; #pragma unroll for( int i_regs = 0 ; i_regs < 4 ; ++i_regs ) m_regs_vals[i_regs] = T(0); if( !m_any_gmem ) return; #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) m_gmem_keys[offset] = -1; m_any_gmem = false; } // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::clear_all() { int lane_id = utils::lane_id(); const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) m_smem_keys[i_step*WARP_SIZE + lane_id] = -1; #pragma unroll for( int i_regs = 0 ; i_regs < 4 ; ++i_regs ) m_regs_vals[i_regs] = T(0); if( !m_any_gmem ) return; #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) { m_gmem_keys[offset] = -1; m_gmem_vals[offset] = T(0); } m_any_gmem = false; } // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::insert( Key_type key, T a_value, T b_value, int *status ) { const int lane_id = utils::lane_id(); bool done = key == -1; m_smem_vote[lane_id].b32 = 0x20202020; #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( i_hash > 0 && __all(done) ) break; bool candidate = false; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = ( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash] ) & (SMEM_SIZE-1); if( !done ) { Key_type stored_key = m_smem_keys[hash]; if( stored_key == key ) { this->try_selection( hash, lane_id ); done = true; } candidate = stored_key == -1; if( candidate ) m_smem_keys[hash] = key; if( candidate && key == m_smem_keys[hash] ) { this->try_selection( hash, lane_id ); done = true; } } } Word my_vote; my_vote.b32 = m_smem_vote[lane_id].b32; #pragma unroll for( int i_regs = 0 ; i_regs < 4 ; ++i_regs ) { int my_src = my_vote.b8[i_regs]; T other_val = utils::shfl( b_value, my_src ); if( my_src != WARP_SIZE ) m_regs_vals[i_regs] = m_regs_vals[i_regs] + a_value * other_val; } const int num_bits = utils::bfind( m_gmem_size ); // TODO: move it outside ::insert. #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) return; m_any_gmem = true; bool candidate = false; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = utils::bfe( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash], num_bits ); if( !done ) { Key_type stored_key = m_gmem_keys[hash]; if( stored_key == key ) { m_gmem_vals[hash] = m_gmem_vals[hash] + a_value * b_value; done = true; } candidate = stored_key == -1; if( candidate ) m_gmem_keys[hash] = key; if( candidate && key == m_gmem_keys[hash] ) // More than one candidate may have written to that slot. { m_gmem_vals[hash] = a_value * b_value; done = true; } } } if( status == NULL || __all(done) ) return; if( lane_id == 0 ) status[0] = 1; } // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::insert_with_duplicates( Key_type key, T val, int *status ) { const int lane_id = utils::lane_id(); bool done = key == -1; m_smem_vote[lane_id].b32 = 0x20202020; #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) break; bool candidate = false; bool maybe_in_conflict = false; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = ( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash] ) & (SMEM_SIZE-1); if( !done ) { Key_type stored_key = m_smem_keys[hash]; if( stored_key == key ) { this->try_selection( hash, lane_id ); maybe_in_conflict = true; done = true; // Is it really done??? } candidate = stored_key == -1; if( candidate ) m_smem_keys[hash] = key; if( candidate && key == m_smem_keys[hash] ) { this->try_selection( hash, lane_id ); maybe_in_conflict = true; done = true; } } // Fix conflicts. bool in_conflict = maybe_in_conflict && !this->is_selected(hash, lane_id); while( __any( in_conflict ) ) { int winner = in_conflict ? this->get_selected(hash) : WARP_SIZE; T other_val = utils::shfl( val, winner ); if( in_conflict ) this->try_selection(hash, lane_id); if( in_conflict && this->is_selected(hash, lane_id) ) { val = val + other_val; in_conflict = false; } } } Word my_vote; my_vote.b32 = m_smem_vote[lane_id].b32; #pragma unroll for( int i_regs = 0 ; i_regs < 4 ; ++i_regs ) { int my_src = my_vote.b8[i_regs]; T other_val = utils::shfl( val, my_src ); if( my_src != WARP_SIZE ) m_regs_vals[i_regs] = m_regs_vals[i_regs] + other_val; } const int num_bits = utils::bfind( m_gmem_size ); // TODO: move it outside ::insert. #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) return; m_any_gmem = true; bool candidate = false; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = utils::bfe( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash], num_bits ); if( !done ) { Key_type stored_key = m_gmem_keys[hash]; if( stored_key == key ) { utils::atomic_add( &m_gmem_vals[hash], val ); done = true; } candidate = stored_key == -1; if( candidate ) m_gmem_keys[hash] = key; if( candidate && key == m_gmem_keys[hash] ) // More than one candidate may have written to that slot. { utils::atomic_add( &m_gmem_vals[hash], val ); done = true; } } } if( status == NULL || __all(done) ) return; if( lane_id == 0 ) status[0] = 1; } // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::load( int count, const Key_type *keys, const int *pos ) { int lane_id = utils::lane_id(); #pragma unroll 4 for( int offset = lane_id ; offset < count ; offset += WARP_SIZE ) { Key_type key = keys[offset]; int idx = pos [offset]; // Where to store the item. volatile Key_type *ptr = m_smem_keys; if( idx >= SMEM_SIZE ) { ptr = m_gmem_keys; m_any_gmem = 1; idx -= SMEM_SIZE; m_gmem_vals[idx] = T(0); } // Store the item. ptr[idx] = key; } m_any_gmem = __any( m_any_gmem ); } // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::store( int count, T *vals ) { int lane_id = utils::lane_id(); int lane_mask_lt = utils::lane_mask_lt(); int warp_offset = 0; const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) { const int offset = i_step*WARP_SIZE + lane_id; Key_type key = m_smem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) vals[dst_offset] = m_regs_vals[i_step]; warp_offset += __popc( poll ); } if( !m_any_gmem ) return; #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) { Key_type key = m_gmem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) vals[dst_offset] = m_gmem_vals[offset]; warp_offset += __popc( poll ); } } // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::store( int count, Key_type *keys, T *vals ) { int lane_id = utils::lane_id(); int lane_mask_lt = utils::lane_mask_lt(); int warp_offset = 0; const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) { const int offset = i_step*WARP_SIZE + lane_id; Key_type key = m_smem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = key; vals[dst_offset] = m_regs_vals[i_step]; } warp_offset += __popc( poll ); } if( !m_any_gmem ) return; #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) { Key_type key = m_gmem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = key; vals[dst_offset] = m_gmem_vals[offset]; } warp_offset += __popc( poll ); } } // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::store_map_keys_scale_values( int count, const int *map, Key_type *keys, T alpha, T *vals ) { int lane_id = utils::lane_id(); int lane_mask_lt = utils::lane_mask_lt(); int warp_offset = 0; const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) { const int offset = i_step*WARP_SIZE + lane_id; Key_type key = m_smem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = map[key]; vals[dst_offset] = alpha*m_regs_vals[i_step]; } warp_offset += __popc( poll ); } if( !m_any_gmem ) return; #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) { Key_type key = m_gmem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = map[key]; vals[dst_offset] = alpha*m_gmem_vals[offset]; } warp_offset += __popc( poll ); } } template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ void Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::store_keys_scale_values( int count, Key_type *keys, T alpha, T *vals ) { int lane_id = utils::lane_id(); int lane_mask_lt = utils::lane_mask_lt(); int warp_offset = 0; const int NUM_STEPS = SMEM_SIZE / WARP_SIZE; #pragma unroll for( int i_step = 0 ; i_step < NUM_STEPS ; ++i_step ) { const int offset = i_step*WARP_SIZE + lane_id; Key_type key = m_smem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = key; vals[dst_offset] = alpha*m_regs_vals[i_step]; } warp_offset += __popc( poll ); } if( !m_any_gmem ) return; #pragma unroll 4 for( int offset = lane_id ; offset < m_gmem_size ; offset += WARP_SIZE ) { Key_type key = m_gmem_keys[offset]; int poll = __ballot( key != -1 ); if( poll == 0 ) continue; int dst_offset = warp_offset + __popc( poll & lane_mask_lt ); if( key != -1 ) { keys[dst_offset] = key; vals[dst_offset] = alpha*m_gmem_vals[offset]; } warp_offset += __popc( poll ); } } // ==================================================================================================================== template< typename Key_type, typename T, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE > __device__ __forceinline__ bool Hash_map<Key_type, T, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE>::update( Key_type key, T val ) { const int lane_id = utils::lane_id(); bool done = key == -1, found = false; m_smem_vote[lane_id].b32 = 0x20202020; #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( i_hash > 0 && __all(done) ) break; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = ( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash] ) & (SMEM_SIZE-1); if( !done ) { Key_type stored_key = m_smem_keys[hash]; if( stored_key == key ) { this->try_selection( hash, lane_id ); found = true; } done = found || stored_key == -1; } } Word my_vote; my_vote.b32 = m_smem_vote[lane_id].b32; #pragma unroll for( int i_regs = 0 ; i_regs < 4 ; ++i_regs ) { int my_src = my_vote.b8[i_regs]; T other_val = utils::shfl( val, my_src ); if( my_src != WARP_SIZE ) m_regs_vals[i_regs] += other_val; } const int num_bits = utils::bfind( m_gmem_size ); // TODO: move it outside ::insert. #pragma unroll for( int i_hash = 0 ; i_hash < NUM_HASH_FCTS ; ++i_hash ) { if( __all(done) ) return found; unsigned ukey = reinterpret_cast<unsigned&>( key ); int hash = utils::bfe( (ukey ^ c_hash_keys[i_hash]) + c_hash_keys[NUM_HASH_FCTS + i_hash], num_bits ); if( !done ) { Key_type stored_key = m_gmem_keys[hash]; if( stored_key == key ) { m_gmem_vals[hash] += val; found = true; } done = found || stored_key == -1; } } return found; } template<typename IndexT, typename Value_type, typename Key_type=IndexT> class Hash_Workspace { private: // Do we need values on the GPU? bool m_allocate_vals; // Constant parameters. const size_t m_grid_size, m_max_warp_count; // The number of threads per row of B. size_t m_num_threads_per_row_count, m_num_threads_per_row_compute; // The size of the GMEM buffers (number of elements). size_t m_gmem_size; // The status: OK if count_non_zeroes succeeded, FAILED otherwise. SHARED_PREFIX::shared_ptr<IndexT> m_status; // The work queue for dynamic load balancing in the kernels. SHARED_PREFIX::shared_ptr<IndexT> m_work_queue; // The buffer to store keys in GMEM. SHARED_PREFIX::shared_ptr<Key_type> m_keys; // The buffer to store values in GMEM. SHARED_PREFIX::shared_ptr<Value_type> m_vals; public: // Create a workspace. Hash_Workspace( bool allocate_vals = true, size_t grid_size = 128, size_t max_warp_count = 8, size_t gmem_size = 2048 ): m_allocate_vals(allocate_vals), m_grid_size(grid_size), m_max_warp_count(max_warp_count), m_num_threads_per_row_count(32), m_num_threads_per_row_compute(32), m_gmem_size(gmem_size), m_status(allocateDevice<IndexT>(1, NULL)), m_work_queue(allocateDevice<IndexT>(1, NULL)) { allocate_workspace(); } // Release memory used by the workspace. virtual ~Hash_Workspace() { //purposely empty... } // Get the size of GMEM. size_t get_gmem_size() const { return m_gmem_size; } // Get the status flag. IndexT* get_status() const { return m_status.get(); } // Get the work queue. IndexT* get_work_queue() const { return m_work_queue.get(); } // Get the keys. Key_type* get_keys() const { return m_keys.get(); } // Get the values. Value_type* get_vals() const { return m_vals.get(); } // Expand the workspace. void expand() { m_gmem_size *= 2; allocate_workspace(); } // Define the number of threads per row of B. void set_num_threads_per_row_count( size_t val ) { m_num_threads_per_row_count = val; } // Define the number of threads per row of B. void set_num_threads_per_row_compute( size_t val ) { m_num_threads_per_row_compute = val; } protected: // Allocate memory to store keys/vals in GMEM. virtual void allocate_workspace(void) { const size_t NUM_WARPS_IN_GRID = m_grid_size * m_max_warp_count; size_t sz = NUM_WARPS_IN_GRID*m_gmem_size*sizeof(Key_type); m_keys = allocateDevice<Key_type>(sz, NULL); if( m_allocate_vals ) { sz = NUM_WARPS_IN_GRID*m_gmem_size*sizeof(Value_type); m_vals = allocateDevice<Value_type>(sz, NULL); } } }; namespace{ //unnamed... static __device__ __forceinline__ int get_work( int *queue, int warp_id, int count = 1 ) { #if __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ int offset = -1; if( utils::lane_id() == 0 ) offset = atomicAdd( queue, count ); return __shfl( offset, 0 ); #else return 0; #endif } enum { WARP_SIZE = 32, GRID_SIZE = 128, SMEM_SIZE = 128 }; template<size_t NUM_THREADS_PER_ROW, size_t CTA_SIZE, size_t SMEM_SIZE, size_t WARP_SIZE, bool HAS_DIAG, typename IndexT, typename Value_type> __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ __launch_bounds__( CTA_SIZE, 8 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 6 ) #endif void fill_A_kernel_1x1( const size_t R_num_rows, const IndexT *R_rows, const IndexT *R_cols, const IndexT *A_rows, const IndexT *A_cols, const IndexT *A_diag, const Value_type *A_vals, const IndexT *aggregates, const IndexT *Ac_rows, const IndexT *Ac_cols, const IndexT *Ac_pos, const IndexT *Ac_diag, Value_type *Ac_vals, size_t gmem_size, IndexT *g_keys, Value_type *g_vals, IndexT *wk_work_queue ) { const size_t NUM_WARPS = CTA_SIZE / WARP_SIZE; const size_t NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW; // The hash keys stored in shared memory. __shared__ volatile IndexT s_keys[NUM_WARPS*SMEM_SIZE]; #if __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ // The hash values stored in shared memory. __shared__ volatile Word s_vote[NUM_WARPS*SMEM_SIZE/4]; #else // Shared memory to vote. __shared__ volatile IndexT s_bcast_row[CTA_SIZE]; // The hash keys stored in shared memory. __shared__ Value_type s_vals[NUM_WARPS*SMEM_SIZE]; // Shared memory to acquire work. __shared__ volatile IndexT s_offsets[NUM_WARPS]; // Shared memory to reduce the diagonal. __shared__ volatile Value_type s_diag[CTA_SIZE]; #endif // The coordinates of the thread inside the CTA/warp. const IndexT warp_id = utils::warp_id(); const IndexT lane_id = utils::lane_id(); // Constants. const size_t lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; const size_t lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... IndexT r_row_id = blockIdx.x*NUM_WARPS + warp_id; // Create local storage for the set. #if __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ Hash_map<IndexT, Value_type, SMEM_SIZE, 4, WARP_SIZE> map( &s_keys[warp_id*SMEM_SIZE ], &g_keys[r_row_id*gmem_size ], &s_vote[warp_id*SMEM_SIZE/4], &g_vals[r_row_id*gmem_size ], gmem_size ); #else Hash_map<IndexT, Value_type, SMEM_SIZE, 4, WARP_SIZE> map( &s_keys[warp_id*SMEM_SIZE ], &g_keys[r_row_id*gmem_size], &s_vals[warp_id*SMEM_SIZE ], &g_vals[r_row_id*gmem_size], gmem_size ); #endif // Loop over rows of A. #if __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ for( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) #else for( ; r_row_id < R_num_rows ; r_row_id = get_work( s_offsets, wk_work_queue, warp_id ) ) #endif { // The indices of the output row. IndexT ac_col_it = Ac_rows[r_row_id+0]; IndexT ac_col_end = Ac_rows[r_row_id+1]; // Clear the set first. TODO: Make sure it's needed. I don't think it is!!!! map.clear(); // Populate the map. map.load( ac_col_end-ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] ); // Load the range of the row. TODO: Make sure it helps. IndexT r_col_it = R_rows[r_row_id + 0]; IndexT r_col_end = R_rows[r_row_id + 1]; // The diagonal. Value_type r_diag(0); // _iterate over the columns of A to build C_hat. for( r_col_it += lane_id ; __any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. IndexT a_row_id = -1; if( is_active ) a_row_id = R_cols[r_col_it]; #if __CUDA_ARCH__ < __CUDA_ARCH_THRESHOLD__ s_bcast_row[threadIdx.x] = a_row_id; #endif // Update the diagonal (if needed). if( HAS_DIAG && is_active ) r_diag = r_diag + A_vals[A_diag[a_row_id]]; const size_t num_rows = __popc( __ballot(is_active) ); // Uniform loop: threads collaborate to load other elements. for( IndexT k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS ) { IndexT local_k = k+lane_id_div_num_threads; // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). #if __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ const IndexT uniform_a_row_id = __shfl( a_row_id, local_k ); #else IndexT uniform_a_row_id = -1; if( local_k < num_rows ) uniform_a_row_id = s_bcast_row[warp_id*WARP_SIZE + local_k]; #endif // The range of the row of B. IndexT a_col_it = 0, a_col_end = 0; if( local_k < num_rows ) { a_col_it = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 0] ); a_col_end = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 1] ); } // Iterate over the range of columns of B. for( a_col_it += lane_id_mod_num_threads ; __any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW ) { // Load columns and values. IndexT a_col_id = -1; Value_type a_value(Value_type(0)); if( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; a_value = A_vals[a_col_it]; } // Find the aggregate. IndexT a_agg_id = -1; if( a_col_it < a_col_end ) a_agg_id = aggregates[a_col_id]; // Update the diag/hash map. if( HAS_DIAG && a_agg_id == r_row_id ) { r_diag = r_diag + a_value; a_agg_id = -1; } map.insert_with_duplicates( a_agg_id, a_value, NULL ); // It won't insert. Only update. } } } // Update the diagonal. if( HAS_DIAG ) { #if __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ r_diag = utils::warp_reduce<1, utils::Add>( r_diag ); #else utils::util<Value_type>::volcast(r_diag, s_diag + threadIdx.x); #ifdef _MSC_VER r_diag = utils::warp_reduce_sum<1, Value_type, 32>(s_diag, r_diag); #else r_diag = utils::warp_reduce<1, utils::Add>(s_diag, r_diag); #endif #endif if( lane_id == 0 ) Ac_vals[Ac_diag[r_row_id]] = r_diag; } // Store the results. IndexT count = ac_col_end - ac_col_it; if( count == 0 ) continue; map.store( count, &Ac_vals[ac_col_it] ); } } template< size_t CTA_SIZE, typename Workspace, typename IndexT, typename Value_type> void fill_A_dispatch( Workspace &hash_wk, const size_t R_num_rows, // same as num_aggregates. const IndexT *R_rows, const IndexT *R_cols, const IndexT *A_rows, const IndexT *A_cols, const Value_type *A_vals, const IndexT *aggregates, const IndexT *Ac_rows, const IndexT *Ac_cols, const IndexT *Ac_pos, Value_type *Ac_vals ) { const size_t NUM_WARPS = CTA_SIZE / WARP_SIZE; cudaStream_t stream = 0; // for now... size_t work_offset = GRID_SIZE*NUM_WARPS; cudaMemcpyAsync( hash_wk.get_work_queue(), &work_offset, sizeof(IndexT), cudaMemcpyHostToDevice, stream ); cudaCheckError(); fill_A_kernel_1x1<8, CTA_SIZE, SMEM_SIZE, 32, false><<<GRID_SIZE, CTA_SIZE>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, static_cast<IndexT*>(0), A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, static_cast<IndexT*>(0), Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), hash_wk.get_vals(), hash_wk.get_work_queue() ); cudaCheckError(); } template<size_t NUM_THREADS_PER_ROW, size_t CTA_SIZE, size_t SMEM_SIZE, size_t WARP_SIZE, bool HAS_DIAG, bool COUNT_ONLY, typename IndexT> __global__ __launch_bounds__( CTA_SIZE ) void compute_sparsity_kernel( const size_t R_num_rows, // same as num_aggregates. const IndexT *R_rows, const IndexT *R_cols, const IndexT *A_rows, const IndexT *A_cols, const IndexT *aggregates, IndexT *Ac_rows, IndexT *Ac_cols, IndexT *Ac_pos, const size_t gmem_size, IndexT *g_keys, IndexT *wk_work_queue, IndexT *wk_status ) { const size_t NUM_WARPS = CTA_SIZE / WARP_SIZE; const size_t NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW; // The hash keys stored in shared memory. __shared__ IndexT s_keys[NUM_WARPS*SMEM_SIZE]; #if __CUDA_ARCH__ < __CUDA_ARCH_THRESHOLD__ // Shared memory to acquire work. __shared__ volatile IndexT s_offsets[NUM_WARPS]; // Shared memory to vote. __shared__ volatile IndexT s_bcast_cols[CTA_SIZE]; #endif // The coordinates of the thread inside the CTA/warp. const IndexT warp_id = utils::warp_id(); const IndexT lane_id = utils::lane_id(); printf("###### milestone 1\n"); // Constants. const IndexT lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; const IndexT lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... IndexT r_row_id = blockIdx.x*NUM_WARPS + warp_id; // Create local storage for the set. #if __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ Hash_set<IndexT, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id*SMEM_SIZE], &g_keys[r_row_id*gmem_size], gmem_size ); #else Hash_set<IndexT, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id*SMEM_SIZE], &g_keys[r_row_id*gmem_size], gmem_size ); #endif printf("###### milestone 2\n"); // Loop over rows of R. // #if __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ for( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) // #else // for( ; r_row_id < R_num_rows ; r_row_id = get_work( s_offsets, wk_work_queue, warp_id ) ) // #endif { // Make sure we have to proceed. if( COUNT_ONLY ) { volatile IndexT *status = reinterpret_cast<volatile IndexT*>( wk_status ); if( set.has_failed() || *status != 0 ) return; } // Clear the set. set.clear(); // Load the range of the row. IndexT r_col_it = R_rows[r_row_id + 0]; IndexT r_col_end = R_rows[r_row_id + 1]; printf("###### milestone 3\n"); // Iterate over the columns of R. for( r_col_it += lane_id ; __any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end; // Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID. IndexT a_row_id = -1; if( is_active ) a_row_id = R_cols[r_col_it]; #if __CUDA_ARCH__ < __CUDA_ARCH_THRESHOLD__ s_bcast_cols[threadIdx.x] = a_row_id; #endif const size_t num_rows = __popc( __ballot(is_active) ); printf("###### milestone 4\n"); // Uniform loop: threads collaborate to load other elements. for( IndexT k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS ) { IndexT local_k = k+lane_id_div_num_threads; // Is it an active thread. bool is_active_k = local_k < num_rows; // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). #if __CUDA_ARCH__ >= __CUDA_ARCH_THRESHOLD__ const IndexT uniform_a_row_id = __shfl( a_row_id, local_k ); #else IndexT uniform_a_row_id = -1; if( is_active_k ) uniform_a_row_id = s_bcast_cols[warp_id*WARP_SIZE + local_k]; #endif printf("###### milestone 5\n"); // Load the range of the row of B. IndexT a_col_it = 0, a_col_end = 0; if( is_active_k ) { a_col_it = A_rows[uniform_a_row_id + 0]; a_col_end = A_rows[uniform_a_row_id + 1]; } // Iterate over the range of columns of B. for( a_col_it += lane_id_mod_num_threads ; __any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW ) { IndexT a_col_id = -1, a_agg_id = -1; if( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; a_agg_id = aggregates[a_col_id]; } //if( a_agg_id >= R_num_rows ) // printf( "Out of range aggregate!!!\n" ); if( HAS_DIAG && a_agg_id == r_row_id ) a_agg_id = -1; set.insert( a_agg_id, COUNT_ONLY ? wk_status : NULL ); } } } printf("###### milestone 6\n"); // Store the results. if( COUNT_ONLY ) { IndexT count = set.compute_size_with_duplicates(); if( lane_id == 0 ) Ac_rows[r_row_id] = count; } else { IndexT ac_col_it = Ac_rows[r_row_id]; set.store_with_positions( &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] ); } } } template< size_t CTA_SIZE, bool HAS_DIAG, bool COUNT_ONLY, typename Workspace, typename IndexT> void compute_sparsity_dispatch( Workspace &hash_wk, const size_t R_num_rows, const IndexT *R_rows, const IndexT *R_cols, const IndexT *A_rows, const IndexT *A_cols, const IndexT *aggregates, IndexT *Ac_rows, IndexT *Ac_cols, IndexT *Ac_pos ) { const size_t NUM_WARPS = CTA_SIZE / WARP_SIZE; //AMGX uses pool allocator thrust::global_thread_handle::cudaMallocHost(), here... // SHARED_PREFIX::shared_ptr<IndexT> h_status(new IndexT); SHARED_PREFIX::shared_ptr<IndexT> h_work_offset(new IndexT); cudaStream_t stream = 0; // for now... int attempt = 0; for( bool done = false ; !done && attempt < 10 ; ++attempt ) { // Double the amount of GMEM (if needed). if( attempt > 0 ) { std::cerr << "LOW_DEG: Requires " << hash_wk.get_gmem_size() << " items per warp!!!" << std::endl; hash_wk.expand(); } // Reset the status. IndexT *p_status = h_status.get(); *p_status = 0; cudaMemcpyAsync( hash_wk.get_status(), p_status, sizeof(IndexT), cudaMemcpyHostToDevice, stream ); cudaCheckError(); // Reset the work queue. IndexT *p_work_offset = h_work_offset.get(); *p_work_offset = GRID_SIZE*NUM_WARPS; cudaMemcpyAsync( hash_wk.get_work_queue(), p_work_offset, sizeof(IndexT), cudaMemcpyHostToDevice, stream ); cudaCheckError(); // Launch the kernel. compute_sparsity_kernel<8, CTA_SIZE, SMEM_SIZE, WARP_SIZE, HAS_DIAG, COUNT_ONLY><<<GRID_SIZE, CTA_SIZE,0,stream>>>(R_num_rows, R_rows, R_cols, A_rows, A_cols, aggregates, Ac_rows, Ac_cols, Ac_pos, hash_wk.get_gmem_size(), hash_wk.get_keys(), hash_wk.get_work_queue(), hash_wk.get_status() ); cudaCheckError(); // Read the result from count_non_zeroes. cudaMemcpyAsync( p_status, hash_wk.get_status(), sizeof(IndexT), cudaMemcpyDeviceToHost, stream ); cudaStreamSynchronize(stream); done = (*p_status == 0); cudaCheckError(); } } }//end unnamed namespace }//nvgraph namespace #endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/kmeans.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "nvgraph_error.hxx" namespace nvgraph { /// Find clusters with k-means algorithm /** Initial centroids are chosen with k-means++ algorithm. Empty * clusters are reinitialized by choosing new centroids with * k-means++ algorithm. * * CNMEM must be initialized before calling this function. * * @param cublasHandle_t cuBLAS handle. * @param n Number of observation vectors. * @param d Dimension of observation vectors. * @param k Number of clusters. * @param tol Tolerance for convergence. k-means stops when the * change in residual divided by n is less than tol. * @param maxiter Maximum number of k-means iterations. * @param obs (Input, device memory, d*n entries) Observation * matrix. Matrix is stored column-major and each column is an * observation vector. Matrix dimensions are d x n. * @param codes (Output, device memory, n entries) Cluster * assignments. * @param residual On exit, residual sum of squares (sum of squares * of distances between observation vectors and centroids). * @param On exit, number of k-means iterations. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR kmeans(IndexType_ n, IndexType_ d, IndexType_ k, ValueType_ tol, IndexType_ maxiter, const ValueType_ * __restrict__ obs, IndexType_ * __restrict__ codes, ValueType_ & residual, IndexType_ & iters); /// Find clusters with k-means algorithm /** Initial centroids are chosen with k-means++ algorithm. Empty * clusters are reinitialized by choosing new centroids with * k-means++ algorithm. * * @param n Number of observation vectors. * @param d Dimension of observation vectors. * @param k Number of clusters. * @param tol Tolerance for convergence. k-means stops when the * change in residual divided by n is less than tol. * @param maxiter Maximum number of k-means iterations. * @param obs (Input, device memory, d*n entries) Observation * matrix. Matrix is stored column-major and each column is an * observation vector. Matrix dimensions are d x n. * @param codes (Output, device memory, n entries) Cluster * assignments. * @param clusterSizes (Output, device memory, k entries) Number of * points in each cluster. * @param centroids (Output, device memory, d*k entries) Centroid * matrix. Matrix is stored column-major and each column is a * centroid. Matrix dimensions are d x k. * @param work (Output, device memory, n*max(k,d) entries) * Workspace. * @param work_int (Output, device memory, 2*d*n entries) * Workspace. * @param residual_host (Output, host memory, 1 entry) Residual sum * of squares (sum of squares of distances between observation * vectors and centroids). * @param iters_host (Output, host memory, 1 entry) Number of * k-means iterations. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR kmeans(IndexType_ n, IndexType_ d, IndexType_ k, ValueType_ tol, IndexType_ maxiter, const ValueType_ * __restrict__ obs, IndexType_ * __restrict__ codes, IndexType_ * __restrict__ clusterSizes, ValueType_ * __restrict__ centroids, ValueType_ * __restrict__ work, IndexType_ * __restrict__ work_int, ValueType_ * residual_host, IndexType_ * iters_host); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/modularity_maximization.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "nvgraph_error.hxx" #include "valued_csr_graph.hxx" #include "matrix.hxx" namespace nvgraph { /** Compute partition for a weighted undirected graph. This * partition attempts to minimize the cost function: * Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition) * * @param G Weighted graph in CSR format * @param nClusters Number of partitions. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter_lanczos Maximum number of Lanczos iterations. * @param restartIter_lanczos Maximum size of Lanczos system before * implicit restart. * @param tol_lanczos Convergence tolerance for Lanczos method. * @param maxIter_kmeans Maximum number of k-means iterations. * @param tol_kmeans Convergence tolerance for k-means algorithm. * @param parts (Output, device memory, n entries) Cluster * assignments. * @param iters_lanczos On exit, number of Lanczos iterations * performed. * @param iters_kmeans On exit, number of k-means iterations * performed. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR modularity_maximization( ValuedCsrGraph<IndexType_,ValueType_>& G, IndexType_ nClusters, IndexType_ nEigVecs, IndexType_ maxIter_lanczos, IndexType_ restartIter_lanczos, ValueType_ tol_lanczos, IndexType_ maxIter_kmeans, ValueType_ tol_kmeans, IndexType_ * __restrict__ clusters, Vector<ValueType_> &eigVals, Vector<ValueType_> &eigVecs, IndexType_ & iters_lanczos, IndexType_ & iters_kmeans) ; /// Compute modularity /** This function determines the modularity based on a graph and cluster assignments * @param G Weighted graph in CSR format * @param nClusters Number of clusters. * @param parts (Input, device memory, n entries) Cluster assignments. * @param modularity On exit, modularity */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR analyzeModularity(ValuedCsrGraph<IndexType_,ValueType_> & G, IndexType_ nClusters, const IndexType_ * __restrict__ parts, ValueType_ & modularity) ; }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/sssp.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <climits> namespace nvgraph { template <typename IndexType_, typename ValueType_> class Sssp { public: typedef IndexType_ IndexType; typedef ValueType_ ValueType; private: ValuedCsrGraph <IndexType, ValueType> m_network ; Vector <ValueType> m_sssp; Vector <ValueType> m_tmp; Vector <int> m_mask; // mask[i] = 0 if we can ignore the i th column in the csrmv IndexType m_source; ValueType m_residual; int m_iterations; bool m_is_setup; cudaStream_t m_stream; bool solve_it(); void setup(IndexType source_index, Vector<ValueType>& source_connection, Vector<ValueType>& sssp_result); public: // Simple constructor Sssp(void) {}; // Simple destructor ~Sssp(void) {}; // Create a Sssp solver attached to a the transposed of a weighted network // *** network is the transposed/CSC*** Sssp(const ValuedCsrGraph <IndexType, ValueType>& network, cudaStream_t stream = 0):m_network(network),m_is_setup(false), m_stream(stream) {}; /*! Find the sortest path from the vertex source_index to every other vertices. * * \param source_index The source. * \param source_connection The connectivity of the source * if there is a link from source_index to i, source_connection[i] = E(source_index, i) * otherwise source_connection[i] = inifinity * source_connection[source_index] = 0 The source_connection is computed somewhere else. * \param (output) m_sssp m_sssp[i] contains the sortest path from the source to the vertex i. */ NVGRAPH_ERROR solve(IndexType source_index, Vector<ValueType>& source_connection, Vector<ValueType>& sssp_result); inline int get_iterations() const {return m_iterations;} }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/test_opt_utils.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <string> #include <sstream> #include <iostream> #include <iomanip> #include <algorithm> #include <limits> #include <utility> #include <cstdint> extern "C" { #include "mmio.h" } #include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <library_types.h> #include <thrust/host_vector.h> #include <thrust/adjacent_difference.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #define CUDACHECK(cudaCall) \ do { \ cudaError_t e = (cudaCall); \ if(e != cudaSuccess) { \ fprintf(stderr, "CUDA Error (%s:%d): %s\n", \ __FILE__, __LINE__, cudaGetErrorString(e)); \ } \ } while(0) std::string getFileName(const std::string& s) { char sep = '/'; #ifdef _WIN32 sep = '\\'; #endif size_t i = s.rfind(sep, s.length()); if (i != std::string::npos) { return(s.substr(i+1, s.length() - i)); } return(""); } template <typename T> void verbose_diff(std::vector<T> & v1, std::vector<T> & v2) { for (unsigned int i = 0; i < v1.size(); ++i) { if (v1[i] != v2[i]) { std::cout << "[" << i <<"] : " << v1[i] << " -- ref = "<< v2[i]<<std::endl; } } } template <typename T> int eq(std::vector<T> & v1, std::vector<T> & v2) { if (v1 == v2) return 0; else { verbose_diff(v1,v2); return 1; } } template <typename T> void printv(size_t n, T* vec, int offset) { thrust::device_ptr<T> dev_ptr(vec); std::cout.precision(15); std::cout << "sample size = "<< n << ", offset = "<< offset << std::endl; thrust::copy(dev_ptr+offset,dev_ptr+offset+n, std::ostream_iterator<T>(std::cout, " ")); std::cout << std::endl; } template <typename T_ELEM> void ref_csr2csc (int m, int n, int nnz, const T_ELEM *csrVals, const int *csrRowptr, const int *csrColInd, T_ELEM *cscVals, int *cscRowind, int *cscColptr, int base=0){ int i,j, row, col, index; int * counters; T_ELEM val; /* early return */ if ((m <= 0) || (n <= 0) || (nnz <= 0)){ return; } /* build compressed column pointers */ memset(cscColptr, 0, (n+1)*sizeof(cscColptr[0])); cscColptr[0]=base; for (i=0; i<nnz; i++){ cscColptr[1+csrColInd[i]-base]++; } for(i=0; i<n; i++){ cscColptr[i+1]+=cscColptr[i]; } /* expand row indecis and copy them and values into csc arrays according to permutation */ counters = (int *)malloc(n*sizeof(counters[0])); memset(counters, 0, n*sizeof(counters[0])); for (i=0; i<m; i++){ for (j=csrRowptr[i]; j<csrRowptr[i+1]; j++){ row = i+base; col = csrColInd[j-base]; index=cscColptr[col-base]-base+counters[col-base]; counters[col-base]++; cscRowind[index]=row; if(csrVals!=NULL || cscVals!=NULL){ val = csrVals[j-base]; cscVals[index] = val; } } } free(counters); } template <typename T> int transition_matrix_cpu(int n, int e, int *csrRowPtrA, int *csrColIndA, T *weight, T* is_leaf) //omp_set_num_threads(4); //#pragma omp parallel { int j,row, row_size; //#pragma omp for for (row=0; row<n; row++) { row_size = csrRowPtrA[row+1] - csrRowPtrA[row]; if (row_size == 0) is_leaf[row]=1.0; else { is_leaf[row]=0.0; for (j=csrRowPtrA[row]; j<csrRowPtrA[row+1]; j++) weight[j] = 1.0/row_size; } } return 0; } /// Read matrix properties from Matrix Market file /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param t (Output) MM_typecode with matrix properties. * @param m (Output) Number of matrix rows. * @param n (Output) Number of matrix columns. * @param nnz (Output) Number of non-zero matrix entries. * @return Zero if properties were read successfully. Otherwise * non-zero. */ template <typename IndexType_> int mm_properties(FILE * f, int tg, MM_typecode * t, IndexType_ * m, IndexType_ * n, IndexType_ * nnz) { // Read matrix properties from file int mint, nint, nnzint; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(*t) || !mm_is_coordinate(*t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&mint,&nint,&nnzint)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(*t) && !mm_is_real(*t) && !mm_is_integer(*t) && !mm_is_complex(*t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } *m = mint; *n = nint; *nnz = nnzint; // Find total number of non-zero entries if(tg && !mm_is_general(*t)) { // Non-diagonal entries should be counted twice IndexType_ nnzOld = *nnz; *nnz *= 2; // Diagonal entries should not be double-counted int i; int st; for(i=0; i<nnzOld; ++i) { // Read matrix entry IndexType_ row, col; double rval, ival; if (mm_is_pattern(*t)) st = fscanf(f, "%d %d\n", &row, &col); else if (mm_is_real(*t) || mm_is_integer(*t)) st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Check if entry is diagonal if(row == col) --(*nnz); } } return 0; } /// Read Matrix Market file and convert to COO format matrix /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param nnz Number of non-zero matrix entries. * @param cooRowInd (Output) Row indices for COO matrix. Should have * at least nnz entries. * @param cooColInd (Output) Column indices for COO matrix. Should * have at least nnz entries. * @param cooRVal (Output) Real component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @param cooIVal (Output) Imaginary component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @return Zero if matrix was read successfully. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> int mm_to_coo(FILE *f, int tg, IndexType_ nnz, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal , ValueType_ * cooIVal) { // Read matrix properties from file MM_typecode t; int m, n, nnzOld; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,&t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(t) || !mm_is_coordinate(t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&m,&n,&nnzOld)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(t) && !mm_is_real(t) && !mm_is_integer(t) && !mm_is_complex(t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } // Add each matrix entry in file to COO format matrix IndexType_ i; // Entry index in Matrix Market file IndexType_ j = 0; // Entry index in COO format matrix for(i=0;i<nnzOld;++i) { // Read entry from file int row, col; double rval, ival; int st; if (mm_is_pattern(t)) { st = fscanf(f, "%d %d\n", &row, &col); rval = 1.0; ival = 0.0; } else if (mm_is_real(t) || mm_is_integer(t)) { st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); ival = 0.0; } else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Switch to 0-based indexing --row; --col; // Record entry cooRowInd[j] = row; cooColInd[j] = col; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; // Add symmetric complement of non-diagonal entries if(tg && !mm_is_general(t) && (row!=col)) { // Modify entry value if matrix is skew symmetric or Hermitian if(mm_is_skew(t)) { rval = -rval; ival = -ival; } else if(mm_is_hermitian(t)) { ival = -ival; } // Record entry cooRowInd[j] = col; cooColInd[j] = row; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; } } return 0; } /// Compare two tuples based on the element indexed by i class lesser_tuple { const int i; public: lesser_tuple(int _i) : i(_i) {} template<typename Tuple1, typename Tuple2> __host__ __device__ bool operator()(const Tuple1 t1, const Tuple2 t2) { switch(i) { case 0: return (thrust::get<0>(t1) < thrust::get<0>(t2)); case 1: return (thrust::get<1>(t1) < thrust::get<1>(t2)); default: return (thrust::get<0>(t1) < thrust::get<0>(t2)); } } }; /// Sort entries in COO format matrix /** Sort is stable. * * @param nnz Number of non-zero matrix entries. * @param sort_by_row Boolean indicating whether matrix entries * will be sorted by row index or by column index. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component for COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component COO matrix entries. Ignored if * null pointer. */ template <typename IndexType_, typename ValueType_> void coo_sort(IndexType_ nnz, int sort_by_row, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal, ValueType_ * cooIVal) { // Determine whether to sort by row or by column int i; if(sort_by_row == 0) i = 1; else i = 0; // Apply stable sort using namespace thrust; if((cooRVal==NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz)), lesser_tuple(i)); else if((cooRVal==NULL) && (cooIVal!=NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooIVal+nnz)), lesser_tuple(i)); else if((cooRVal!=NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooRVal+nnz)), lesser_tuple(i)); else stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz, cooRVal+nnz,cooIVal+nnz)), lesser_tuple(i)); } /// Compress sorted list of indices /** For use in converting COO format matrix to CSR or CSC format. * * @param n Maximum index. * @param nnz Number of non-zero matrix entries. * @param sortedIndices Sorted list of indices (COO format). * @param compressedIndices (Output) Compressed list of indices (CSR * or CSC format). Should have at least n+1 entries. */ template <typename IndexType_> void coo_compress(IndexType_ m, IndexType_ n, IndexType_ nnz, const IndexType_ * __restrict__ sortedIndices, IndexType_ * __restrict__ compressedIndices) { IndexType_ i; // Initialize everything to zero memset(compressedIndices, 0, (m+1)*sizeof(IndexType_)); // Count number of elements per row for(i=0; i<nnz; ++i) ++(compressedIndices[sortedIndices[i]+1]); // Compute cumulative sum to obtain row offsets/pointers for(i=0; i<m; ++i) compressedIndices[i+1] += compressedIndices[i]; } /// Convert COO format matrix to CSR format /** On output, matrix entries in COO format matrix will be sorted * (primarily by row index, secondarily by column index). * * @param m Number of matrix rows. * @param n Number of matrix columns. * @param nnz Number of non-zero matrix entries. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component of COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component of COO matrix entries. Ignored * if null pointer. * @param csrRowPtr Row pointers for CSR matrix. Should have at least * n+1 entries. * @param csrColInd Column indices for CSR matrix (identical to * output of cooColInd). Should have at least nnz entries. Ignored if * null pointer. * @param csrRVal Real component of CSR matrix entries (identical to * output of cooRVal). Should have at least nnz entries. Ignored if * null pointer. * @param csrIVal Imaginary component of CSR matrix entries * (identical to output of cooIVal). Should have at least nnz * entries. Ignored if null pointer. * @return Zero if matrix was converted successfully. Otherwise * non-zero. */ template <typename IndexType_, typename ValueType_> int coo_to_csr(IndexType_ m, IndexType_ n, IndexType_ nnz, IndexType_ * __restrict__ cooRowInd, IndexType_ * __restrict__ cooColInd, ValueType_ * __restrict__ cooRVal, ValueType_ * __restrict__ cooIVal, IndexType_ * __restrict__ csrRowPtr, IndexType_ * __restrict__ csrColInd, ValueType_ * __restrict__ csrRVal, ValueType_ * __restrict__ csrIVal) { // Convert COO to CSR matrix coo_sort(nnz, 0, cooRowInd, cooColInd, cooRVal, cooIVal); coo_sort(nnz, 1, cooRowInd, cooColInd, cooRVal, cooIVal); coo_compress(m, n, nnz, cooRowInd, csrRowPtr); // Copy arrays if(csrColInd!=NULL) memcpy(csrColInd, cooColInd, nnz*sizeof(IndexType_)); if((cooRVal!=NULL) && (csrRVal!=NULL)) memcpy(csrRVal, cooRVal, nnz*sizeof(ValueType_)); if((cooIVal!=NULL) && (csrIVal!=NULL)) memcpy(csrIVal, cooIVal, nnz*sizeof(ValueType_)); return 0; }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/sm_utils.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifdef _MSC_VER #include <stdint.h> #else #include <inttypes.h> #endif #define DEFAULT_MASK 0xffffffff #define USE_CG 1 //(__CUDACC_VER__ >= 80500) namespace nvgraph { namespace utils { static __device__ __forceinline__ int lane_id() { int id; asm ( "mov.u32 %0, %%laneid;" : "=r"(id) ); return id; } static __device__ __forceinline__ int lane_mask_lt() { int mask; asm ( "mov.u32 %0, %%lanemask_lt;" : "=r"(mask) ); return mask; } static __device__ __forceinline__ int lane_mask_le() { int mask; asm ( "mov.u32 %0, %%lanemask_le;" : "=r"(mask) ); return mask; } static __device__ __forceinline__ int warp_id() { return threadIdx.x >> 5; } static __device__ __forceinline__ unsigned int ballot(int p, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #if USE_CG return __ballot_sync(mask, p); #else return __ballot(p); #endif #else return 0; #endif } static __device__ __forceinline__ int shfl(int r, int lane, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #if USE_CG return __shfl_sync(mask, r, lane, bound ); #else return __shfl(r, lane, bound ); #endif #else return 0; #endif } static __device__ __forceinline__ float shfl(float r, int lane, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #if USE_CG return __shfl_sync(mask, r, lane, bound ); #else return __shfl(r, lane, bound ); #endif #else return 0.0f; #endif } /// Warp shuffle down function /** Warp shuffle functions on 64-bit floating point values are not * natively implemented as of Compute Capability 5.0. This * implementation has been copied from * (http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler). * Once this is natively implemented, this function can be replaced * by __shfl_down. * */ static __device__ __forceinline__ double shfl(double r, int lane, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_sync(mask, a.x, lane, bound); a.y = __shfl_sync(mask, a.y, lane, bound); return *reinterpret_cast<double*>(&a); #else int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl(a.x, lane, bound); a.y = __shfl(a.y, lane, bound); return *reinterpret_cast<double*>(&a); #endif #else return 0.0; #endif } static __device__ __forceinline__ long long shfl(long long r, int lane, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_sync(mask, a.x, lane, bound); a.y = __shfl_sync(mask, a.y, lane, bound); return *reinterpret_cast<long long*>(&a); #else int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl(a.x, lane, bound); a.y = __shfl(a.y, lane, bound); return *reinterpret_cast<long long*>(&a); #endif #else return 0.0; #endif } static __device__ __forceinline__ int shfl_down(int r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG return __shfl_down_sync( mask, r, offset, bound ); #else return __shfl_down( r, offset, bound ); #endif #else return 0.0f; #endif } static __device__ __forceinline__ float shfl_down(float r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG return __shfl_down_sync( mask, r, offset, bound ); #else return __shfl_down( r, offset, bound ); #endif #else return 0.0f; #endif } static __device__ __forceinline__ double shfl_down(double r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_down_sync(mask, a.x, offset, bound); a.y = __shfl_down_sync(mask, a.y, offset, bound); return *reinterpret_cast<double*>(&a); #else int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_down(a.x, offset, bound); a.y = __shfl_down(a.y, offset, bound); return *reinterpret_cast<double*>(&a); #endif #else return 0.0; #endif } static __device__ __forceinline__ long long shfl_down(long long r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_down_sync(mask, a.x, offset, bound); a.y = __shfl_down_sync(mask, a.y, offset, bound); return *reinterpret_cast<long long*>(&a); #else int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_down(a.x, offset, bound); a.y = __shfl_down(a.y, offset, bound); return *reinterpret_cast<long long*>(&a); #endif #else return 0.0; #endif } // specifically for triangles counting static __device__ __forceinline__ uint64_t shfl_down(uint64_t r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_down_sync(mask, a.x, offset, bound); a.y = __shfl_down_sync(mask, a.y, offset, bound); return *reinterpret_cast<uint64_t*>(&a); #else int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_down(mask, a.x, offset, bound); a.y = __shfl_down(mask, a.y, offset, bound); return *reinterpret_cast<uint64_t*>(&a); #endif #else return 0.0; #endif } static __device__ __forceinline__ int shfl_up(int r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG return __shfl_up_sync( mask, r, offset, bound ); #else return __shfl_up( r, offset, bound ); #endif #else return 0.0f; #endif } static __device__ __forceinline__ float shfl_up(float r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG return __shfl_up_sync( mask, r, offset, bound ); #else return __shfl_up( r, offset, bound ); #endif #else return 0.0f; #endif } static __device__ __forceinline__ double shfl_up(double r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_up_sync(mask, a.x, offset, bound); a.y = __shfl_up_sync(mask, a.y, offset, bound); return *reinterpret_cast<double*>(&a); #else int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_up(a.x, offset, bound); a.y = __shfl_up(a.y, offset, bound); return *reinterpret_cast<double*>(&a); #endif #else return 0.0; #endif } static __device__ __forceinline__ long long shfl_up(long long r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #ifdef USE_CG int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_up_sync(mask, a.x, offset, bound); a.y = __shfl_up_sync(mask, a.y, offset, bound); return *reinterpret_cast<long long*>(&a); #else int2 a = *reinterpret_cast<int2*>(&r); a.x = __shfl_up(a.x, offset, bound); a.y = __shfl_up(a.y, offset, bound); return *reinterpret_cast<long long*>(&a); #endif #else return 0.0; #endif } } }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/app/nvlouvain_sample.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> // Turn on to see stats for each level //#define ENABLE_LOG true #include "nvlouvain.cuh" /* Louvain Clustering Sample Social network example: Zachary Karate Club W. Zachary, “An information flow model for conflict and fission in small groups,” Journal of Anthropological Research, vol. 33, pp. 452–473, 1977 https://en.wikipedia.org/wiki/Zachary's_karate_club -------------------------------------------------------------------- V = 34 E = 78 bidirectional, 156 directed edges Bidirectional edges list: [2 1] [3 1] [3 2] [4 1] [4 2] [4 3] [5 1] [6 1] [7 1] [7 5] [7 6] [8 1] [8 2] [8 3] [8 4] [9 1] [9 3] [10 3] [11 1] [11 5] [11 6] [12 1] [13 1] [13 4] [14 1] [14 2] [14 3] [14 4] [17 6] [17 7] [18 1] [18 2] [20 1] [20 2] [22 1] [22 2] [26 24] [26 25] [28 3] [28 24] [28 25] [29 3] [30 24] [30 27] [31 2] [31 9] [32 1] [32 25] [32 26] [32 29] [33 3] [33 9] [33 15] [33 16] [33 19] [33 21] [33 23] [33 24] [33 30] [33 31] [33 32] [34 9] [34 10] [34 14] [34 15] [34 16] [34 19] [34 20] [34 21] [34 23] [34 24] [34 27] [34 28] [34 29] [34 30] [34 31] [34 32] [34 33] CSR representation (directed): csrRowPtrA_h {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156} csrColIndA_h {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32} csrValA_h {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0} -------------------------------------------------------------------- Operation: Louvain Clustering default parameters in modularity maximization -------------------------------------------------------------------- Expected output: This sample prints the modlarity score and compare against the python reference (https://python-louvain.readthedocs.io/en/latest/api.html) */ using namespace nvlouvain; void check_status(nvlouvainStatus_t status) { if ((int)status != 0) { printf("ERROR : %s\n",nvlouvainStatusGetString(status)); exit(0); } } int main(int argc, char **argv) { // Hard-coded Zachary Karate Club network input int csrRowPtrA_input [] = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; int csrColIndA_input [] = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; float csrValA_input [] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; int ref_clustering [] = {0, 0, 0, 0, 1, 1, 1, 0, 2, 0, 1, 0, 0, 0, 2, 2, 1, 0, 2, 0, 2, 0, 2, 3, 3, 3, 2, 3, 3, 2, 2, 3, 2, 2}; int *csrRowPtrA_h = &csrRowPtrA_input[0]; int *csrColIndA_h = &csrColIndA_input[0]; float *csrValA_h = &csrValA_input[0]; // Variables const size_t n = 34, nnz = 156; bool weighted = false; bool has_init_cluster = false; int *clustering_h, *init_cluster_ptr = nullptr;; int num_levels = 0, hits =0; float final_modulartiy = 0; // Allocate host data for nvgraphSpectralClustering output clustering_h = (int*)malloc(n*sizeof(int)); //Solve clustering with modularity maximization algorithm check_status(louvain<int,float>(csrRowPtrA_h, csrColIndA_h, csrValA_h, n, nnz, weighted, has_init_cluster, init_cluster_ptr, final_modulartiy, clustering_h, num_levels)); //Print quality (modualrity) printf("Modularity_score: %f\n", final_modulartiy); printf("num levels: %d\n", num_levels); for (int i = 0; i < (int)n; i++) if (clustering_h[i] == ref_clustering[i]) hits++; printf("Hit rate : %f%% (%d hits)\n", (hits*100.0)/n, hits); // Print the clustering vector in csv format //for (int i = 0; i < (int)(n-1); i++) // printf("%d,",clustering_h[i]); //printf("%d,\n",clustering_h[n-1]); free(clustering_h); printf("Done!\n"); return EXIT_SUCCESS; }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/app/nvlouvain_app_hierarchy.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <string> #include <cstring> #include <vector> #include <cmath> #include "test_opt_utils.cuh" #include "graph_utils.cuh" //#define ENABLE_LOG true #define ENALBE_LOUVAIN true #include "nvlouvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <cuda.h> #include <cuda_profiler_api.h> using T = double; int main(int argc, char* argv[]){ if(argc < 2) { std::cout<< "Help : ./louvain_test matrix_market_file.mtx"<<std::endl; return 1; } FILE* fin = std::fopen( argv[1] ,"r"); int m, k, nnz; MM_typecode mc; CUDA_CALL(cudaSetDevice(0)); EXPECT_EQ((mm_properties<int>(fin, 1, &mc, &m, &k, &nnz)) ,0); EXPECT_EQ(m,k); thrust::host_vector<int> coo_ind_h(nnz); thrust::host_vector<int> csr_ptr_h(m+1); thrust::host_vector<int> csr_ind_h(nnz); thrust::host_vector<T> csr_val_h(nnz); EXPECT_EQ( (mm_to_coo<int,T>(fin, 1, nnz, &coo_ind_h[0], &csr_ind_h[0], &csr_val_h[0], NULL)), 0); EXPECT_EQ( (coo_to_csr<int,T> (m, k, nnz, &coo_ind_h[0], &csr_ind_h[0], &csr_val_h[0], NULL, &csr_ptr_h[0], NULL, NULL, NULL)), 0); EXPECT_EQ(fclose(fin),0); thrust::device_vector<int> csr_ptr_d(csr_ptr_h); thrust::device_vector<int> csr_ind_d(csr_ind_h); thrust::device_vector<T> csr_val_d(csr_val_h); thrust::device_vector<T> tmp_1(nnz); thrust::fill(thrust::cuda::par, tmp_1.begin(), tmp_1.end(), 1.0); thrust::device_vector<T>::iterator max_ele = thrust::max_element(thrust::cuda::par, csr_val_d.begin(), csr_val_d.end()); bool weighted = (*max_ele!=1.0); //std::cout<<(weighted?"Weighted ":"Not Weigthed ")<<" n_vertex: "<<m<<"\n"; HighResClock hr_clock; double louvain_time; if(ENALBE_LOUVAIN){ T final_modulartiy(0); //bool record = true; bool has_init_cluster = false; thrust::device_vector<int> cluster_d(m, 0); std::vector< std::vector<int> > best_cluster_vec; int* csr_ptr_ptr = thrust::raw_pointer_cast(csr_ptr_d.data()); int* csr_ind_ptr = thrust::raw_pointer_cast(csr_ind_d.data()); T* csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data()); int* init_cluster_ptr = thrust::raw_pointer_cast(cluster_d.data()); int num_level; cudaProfilerStart(); hr_clock.start(); nvlouvain::louvain<int,T>(csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, m, nnz, weighted, has_init_cluster, init_cluster_ptr, final_modulartiy, best_cluster_vec, num_level); hr_clock.stop(&louvain_time); cudaProfilerStop(); std::cout<<"Final modularity: "<<COLOR_MGT<<final_modulartiy<<COLOR_WHT<<" num_level: "<<num_level<<std::endl; std::cout<<"louvain total runtime:"<<louvain_time/1000<<" ms\n"; //for (size_t i = 0; i < best_cluster_vec.size(); i++) //{ // for(std::vector<int>::iterator it = best_cluster_vec[i].begin(); it != best_cluster_vec[i].end(); ++it) // std::cout << *it <<' '; // std::cout << std::endl; //} } return 0; }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/app/nvlouvain_sample_hierarchy.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> // Turn on to see stats for each level //#define ENABLE_LOG true #include "nvlouvain.cuh" /* Louvain Clustering Sample Social network example: Zachary Karate Club W. Zachary, “An information flow model for conflict and fission in small groups,” Journal of Anthropological Research, vol. 33, pp. 452–473, 1977 https://en.wikipedia.org/wiki/Zachary's_karate_club -------------------------------------------------------------------- V = 34 E = 78 bidirectional, 156 directed edges Bidirectional edges list: [2 1] [3 1] [3 2] [4 1] [4 2] [4 3] [5 1] [6 1] [7 1] [7 5] [7 6] [8 1] [8 2] [8 3] [8 4] [9 1] [9 3] [10 3] [11 1] [11 5] [11 6] [12 1] [13 1] [13 4] [14 1] [14 2] [14 3] [14 4] [17 6] [17 7] [18 1] [18 2] [20 1] [20 2] [22 1] [22 2] [26 24] [26 25] [28 3] [28 24] [28 25] [29 3] [30 24] [30 27] [31 2] [31 9] [32 1] [32 25] [32 26] [32 29] [33 3] [33 9] [33 15] [33 16] [33 19] [33 21] [33 23] [33 24] [33 30] [33 31] [33 32] [34 9] [34 10] [34 14] [34 15] [34 16] [34 19] [34 20] [34 21] [34 23] [34 24] [34 27] [34 28] [34 29] [34 30] [34 31] [34 32] [34 33] CSR representation (directed): csrRowPtrA_h {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156} csrColIndA_h {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32} csrValA_h {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0} -------------------------------------------------------------------- Operation: Louvain Clustering default parameters in modularity maximization -------------------------------------------------------------------- Expected output: This sample prints the modlarity score */ using namespace nvlouvain; void check_status(nvlouvainStatus_t status) { if ((int)status != 0) { printf("ERROR : %s\n",nvlouvainStatusGetString(status)); exit(0); } } int main(int argc, char **argv) { // Hard-coded Zachary Karate Club network input int csrRowPtrA_input [] = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; int csrColIndA_input [] = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; float csrValA_input [] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; // int ref_clustering [] = {1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int *csrRowPtrA_h = &csrRowPtrA_input[0]; int *csrColIndA_h = &csrColIndA_input[0]; float *csrValA_h = &csrValA_input[0]; // Variables const size_t n = 34, nnz = 156; bool weighted = false; bool has_init_cluster = false; int num_levels = 0; int *init_cluster_ptr = nullptr; float final_modulartiy = 0; std::vector< std::vector<int> > best_cluster_vec; //Solve clustering with modularity maximization algorithm check_status(louvain<int,float>(csrRowPtrA_h, csrColIndA_h, csrValA_h, n, nnz, weighted, has_init_cluster, init_cluster_ptr, final_modulartiy, best_cluster_vec, num_levels)); //Print quality (modualrity) printf("Modularity_score: %f\n", final_modulartiy); printf("num levels: %d\n", num_levels); printf("Done!\n"); //for (size_t i = 0; i < best_cluster_vec.size(); i++) //{ // for(std::vector<int>::iterator it = best_cluster_vec[i].begin(); it != best_cluster_vec[i].end(); ++it) // std::cout << *it <<' '; // std::cout << std::endl; //} return EXIT_SUCCESS; }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/app/nvlouvain_app.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <string> #include <cstring> #include <vector> #include <cmath> #include "test_opt_utils.cuh" #include "graph_utils.cuh" //#define ENABLE_LOG TRUE #define ENALBE_LOUVAIN true #include "nvlouvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <cuda.h> #include <cuda_profiler_api.h> using T = float; int main(int argc, char* argv[]){ if(argc < 2) { std::cout<< "Help : ./louvain_test matrix_market_file.mtx"<<std::endl; return 1; } FILE* fin = std::fopen( argv[1] ,"r"); int m, k, nnz; MM_typecode mc; CUDA_CALL(cudaSetDevice(0)); EXPECT_EQ((mm_properties<int>(fin, 1, &mc, &m, &k, &nnz)) ,0); EXPECT_EQ(m,k); thrust::host_vector<int> coo_ind_h(nnz); thrust::host_vector<int> csr_ptr_h(m+1); thrust::host_vector<int> csr_ind_h(nnz); thrust::host_vector<T> csr_val_h(nnz); EXPECT_EQ( (mm_to_coo<int,T>(fin, 1, nnz, &coo_ind_h[0], &csr_ind_h[0], &csr_val_h[0], NULL)), 0); EXPECT_EQ( (coo_to_csr<int,T> (m, k, nnz, &coo_ind_h[0], &csr_ind_h[0], &csr_val_h[0], NULL, &csr_ptr_h[0], NULL, NULL, NULL)), 0); EXPECT_EQ(fclose(fin),0); thrust::device_vector<int> csr_ptr_d(csr_ptr_h); thrust::device_vector<int> csr_ind_d(csr_ind_h); thrust::device_vector<T> csr_val_d(csr_val_h); thrust::device_vector<T> tmp_1(nnz); thrust::fill(thrust::cuda::par, tmp_1.begin(), tmp_1.end(), 1.0); thrust::device_vector<T>::iterator max_ele = thrust::max_element(thrust::cuda::par, csr_val_d.begin(), csr_val_d.end()); bool weighted = (*max_ele!=1.0); //std::cout<<(weighted?"Weighted ":"Not Weigthed ")<<" n_vertex: "<<m<<"\n"; HighResClock hr_clock; double louvain_time; if(ENALBE_LOUVAIN){ T final_modulartiy(0); //bool record = true; bool has_init_cluster = false; int *clustering_h = (int*)malloc(m*sizeof(int)); thrust::device_vector<int> cluster_d(m, 0); int* csr_ptr_ptr = thrust::raw_pointer_cast(csr_ptr_d.data()); int* csr_ind_ptr = thrust::raw_pointer_cast(csr_ind_d.data()); T* csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data()); int* init_cluster_ptr = thrust::raw_pointer_cast(cluster_d.data()); int num_level; cudaProfilerStart(); hr_clock.start(); nvlouvain::louvain<int,T>(csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, m, nnz, weighted, has_init_cluster, init_cluster_ptr, final_modulartiy, clustering_h, num_level); hr_clock.stop(&louvain_time); cudaProfilerStop(); std::cout<<"Final modularity: "<<COLOR_MGT<<final_modulartiy<<COLOR_WHT<<" num_level: "<<num_level<<std::endl; std::cout<<"louvain total runtime:"<<louvain_time/1000<<" ms\n"; } return 0; }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/test/phase_1_color_test.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "back_up.cuh" template<typename IdxIter, typename ValIter, typename IdxType=int, typename ValType> __global__ void kernel_phase_1_color(const int n_vertex, IdxIter csr_ptr_iter, IdxIter csr_ind_iter, ValIter csr_val_iter, IdxIter cluster, IdxType* color, IdxType color_size,ValType *matrix, IdxType *cluster_sizes, ValType* improve, IdxType* n_moved){ *n_moved = 0; IdxType j = blockIdx.x * blockDim.x + threadIdx.x; IdxType i = blockIdx.y * blockDim.y + threadIdx.y; for( int t = 0; t < color_size; ++t ){ // color t if( i< n_vertex && color[i] == t ){ IdxType start_idx = *(csr_ptr_iter + i); IdxType end_idx = *(csr_ptr_iter + i + 1); if(j < end_idx - start_idx){ IdxType c = cluster[ csr_ind_iter[start_idx + j]]; //printf("i:%d j:%d start:%d end:%d c:%d\n",i,j,start_idx, end_idx,c); nvlouvain::phase_1( n_vertex, csr_ptr_iter, csr_ind_iter, csr_val_iter, cluster, i, j, c, matrix, cluster_sizes, improve, n_moved); } } } } /* void phase_1_color_test(thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d, const int size){ HighResClock hr_clock; double timed; dim3 block_size((size + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, (size + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, 1); dim3 grid_size(BLOCK_SIZE_2D, BLOCK_SIZE_2D, 1); thrust::device_vector<int> cluster_d(size); thrust::sequence(cluster_d.begin(), cluster_d.end()); std::cout<<"old cluster: "; nvlouvain::display_vec(cluster_d); thrust::device_vector<T> Q_d(1); T* Q_d_raw_ptr = thrust::raw_pointer_cast(Q_d.data()); thrust::device_vector<T> matrix(size*size); T* matrix_raw_ptr = thrust::raw_pointer_cast(matrix.data()); hr_clock.start(); kernel_modularity<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), matrix_raw_ptr, Q_d_raw_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double mod_time(timed); std::cout<<"modularity: "<<Q_d[0]<<" runtime: "<<mod_time<<std::endl; thrust::device_vector<T> improve_d(1); T* improve_d_raw_ptr = thrust::raw_pointer_cast(improve_d.data()); thrust::device_vector<int> c_size_d(size, 1); int* c_size_d_raw_ptr = thrust::raw_pointer_cast(c_size_d.data()); thrust::device_vector<int> n_moved(1, 0); int* n_moved_ptr = thrust::raw_pointer_cast(n_moved.data()); // nvlouvain::display_vec(c_size_d); //-------------------------------- 1st - thrust::device_vector<T> Q_old(Q_d); double delta_Q; int count = 0; int num_move = 0; int color_size; std::vector<int> fill_color(size); if(size == 16){ fill_color = {0, 0, 1, 2, 2, 2, 0, 1, 2, 0, 0, 1, 1, 2, 1, 0}; color_size = 3; } else if(size == 4){ fill_color = {0, 1, 2, 0}; color_size = 3; } thrust::device_vector<int> color(fill_color); int* color_ptr = thrust::raw_pointer_cast(color.data()); do{ Q_old[0] = Q_d[0]; hr_clock.start(); kernel_phase_1_color<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), color_ptr, color_size, matrix_raw_ptr, c_size_d_raw_ptr, improve_d_raw_ptr, n_moved_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); mod_time = timed; std::cout<<"new cluster: "; nvlouvain::display_vec(cluster_d); std::cout<<"improvement: "<<improve_d[0]<<" runtime: "<<mod_time<<std::endl; kernel_modularity<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), matrix_raw_ptr, Q_d_raw_ptr); CUDA_CALL(cudaDeviceSynchronize()); delta_Q = Q_d[0] - Q_old[0]; std::cout<<"new modularity: "<<Q_d[0]<<" delta_Q:"<<delta_Q<<" runtime: "<<mod_time<<std::endl; std::cout<<"cluster size: "; nvlouvain::display_vec(c_size_d); int sum = thrust::reduce(thrust::cuda::par, c_size_d.begin(), c_size_d.end(), 0); num_move = n_moved[0]; std::cout<<"sum: "<< sum<<" moved: "<<num_move<<std::endl; ++count; }while( num_move > 0 ); } */
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/test/delta_modularity_test.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <string> #include "test_opt_utils.h" #include "graph_utils.cuh" #include "louvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include "util.cuh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> template<typename IdxIter, typename ValIter, typename ValType> __global__ void kernel_delta_modularity(const int n_vertex, IdxIter csr_ptr_iter, IdxIter csr_ind_iter, ValIter csr_val_iter, IdxIter cluster, ValType* score){ int c = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if( i<n_vertex && c < n_vertex ){ nvlouvain::delta_modularity_block( n_vertex, csr_ptr_iter, csr_ind_iter, csr_val_iter, cluster, i, c, &score[i*n_vertex +c] ); //printf("i: %d c: %d delta: %f\n", i, c, score[i*n_vertex +c] ); } } void delta_modularity_test(thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d, const int size){ HighResClock hr_clock; double timed; dim3 block_size((size + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, (size + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, 1); dim3 grid_size(BLOCK_SIZE_2D, BLOCK_SIZE_2D, 1); thrust::device_vector<int> cluster_d(size); thrust::sequence(cluster_d.begin(), cluster_d.end()); std::cout<<"cluster: "; nvlouvain::display_vec(cluster_d); thrust::device_vector<T> score_d(size*size); T* score_d_raw_ptr = thrust::raw_pointer_cast(score_d.data()); hr_clock.start(); kernel_delta_modularity<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), score_d_raw_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double mod_time(timed); std::cout<<"delta modularity: "<<score_d[0]<<" runtime: "<<mod_time<<std::endl; }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/test/thrust_test.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <string> #include "test_opt_utils.h" #include "graph_utils.cuh" #include "louvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> template<typename iter, typename ptr > __global__ void test_sum(iter begin, iter end, ptr sum){ thrust::plus<T> op; *sum = thrust::reduce(thrust::cuda::par, begin, end, 0.0, op); } __global__ void test_sum_cast(T* vec, size_t size, T* sum){ thrust::plus<T> op; *sum = thrust::reduce(thrust::cuda::par, vec, vec+size, 0.0, op); } void thrust_passing_arg_test( thrust::host_vector<int> &csr_ptr_h, thrust::host_vector<int> &csr_ind_h, thrust::host_vector<T> &csr_val_h, thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d){ HighResClock hr_clock; double timed; thrust::plus<T> binary_op; hr_clock.start(); T sum_h = thrust::reduce(csr_val_h.begin(), csr_val_h.end(), 0.0, binary_op); hr_clock.stop(&timed); double cpu_time(timed); thrust::copy(csr_val_d.begin(), csr_val_d.end(), std::ostream_iterator<float>(std::cout, " ")); std::cout<<std::endl; dim3 block_size(1, 1, 1); dim3 grid_size(1, 1, 1); hr_clock.start(); T sum_r = thrust::reduce(csr_val_d.begin(), csr_val_d.end(), 0.0, binary_op); hr_clock.stop(&timed); double r_time(timed); hr_clock.start(); thrust::device_vector<T> sum_d(1, 0.0); test_sum<<<block_size,grid_size>>>( csr_val_d.begin(),csr_val_d.end(), sum_d.data()); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double cuda_time(timed); hr_clock.start(); cudaStream_t s; thrust::device_vector<T> sum_a(1, 0.0); cudaStreamCreate(&s); test_sum<<<1,1,0,s>>>(csr_val_d.begin(),csr_val_d.end(), sum_a.data()); cudaStreamSynchronize(s); hr_clock.stop(&timed); double asyn_time(timed); hr_clock.start(); T* csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data()); double* raw_sum; double sum_cast; cudaMalloc((void **) &raw_sum, sizeof(double)); test_sum_cast<<<block_size,grid_size>>>( csr_val_ptr, csr_val_d.size(), raw_sum); cudaMemcpy(&sum_cast, raw_sum, sizeof(double),cudaMemcpyDeviceToHost); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double cast_time(timed); cudaFree(raw_sum); std::cout<<"cpu sum of val: "<< sum_h <<" runtime: "<<cpu_time<<std::endl; std::cout<<"device sum of val: "<< sum_r <<" runtime: "<<r_time<<std::endl; std::cout<<"kernel sum of val: "<< sum_d[0] <<" runtime: "<<cuda_time<<std::endl; std::cout<<"async sum of val: "<< sum_a[0] <<" runtime: "<<asyn_time<<std::endl; std::cout<<"cast: sum of val: "<< sum_cast <<" runtime: "<<cast_time<<std::endl; }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/test/cluster_inv.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <string> #include <vector> #include "test_opt_utils.h" #include "graph_utils.cuh" #include "louvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include "util.cuh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> void cluster_inv_test(){ std::vector<int> cluster = {0, 1, 1, 2, 1, 0, 2, 2, 3, 4, 5, 6, 4, 6, 5, 3}; int n_vertex = 16; int c_size = 7; thrust::device_vector<int> cluster_d(cluster.begin(), cluster.end()); thrust::device_vector<int> cluster_inv_ptr(c_size + 1); thrust::device_vector<int> cluster_inv_ind(n_vertex); int* cluster_inv_ptr_ptr = thrust::raw_pointer_cast(cluster_inv_ptr.data()); int* cluster_inv_ind_ptr = thrust::raw_pointer_cast(cluster_inv_ind.data()); thrust::device_vector<int> seq_idx(n_vertex); thrust::sequence(seq_idx.begin(), seq_idx.end()); int* seq_idx_ptr = thrust::raw_pointer_cast(seq_idx.data()); dim3 block_size((n_vertex + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size(BLOCK_SIZE_1D, 1, 1); nvlouvain::generate_cluster_inv(n_vertex, c_size, cluster_d.begin(), cluster_inv_ptr, cluster_inv_ind); #ifdef VERBOSE nvlouvain::display_vec(cluster_inv_ptr); nvlouvain::display_vec(cluster_inv_ind); #endif // nvlouvain::display_vec_size(cluster_inv_ind_ptr, n_vertex); } void cluster_sum_test(thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d, const int n_vertex, bool weighted){ HighResClock hr_clock; double timed, diff_time; std::vector<int> cluster(n_vertex); int c_size; if(n_vertex == 16){ cluster = {0, 1, 1, 2, 1, 0, 2, 2, 3, 4, 5, 6, 4, 6, 5, 3}; c_size = 7; } else{ for(int i = 0 ; i <n_vertex ; ++i){ cluster[i]=i; } c_size = n_vertex; } thrust::device_vector<int> cluster_d(cluster.begin(), cluster.end()); thrust::device_vector<int> cluster_inv_ptr(c_size+1); thrust::device_vector<int> cluster_inv_ind(n_vertex); int* cluster_inv_ptr_ptr = thrust::raw_pointer_cast(cluster_inv_ptr.data()); int* cluster_inv_ind_ptr = thrust::raw_pointer_cast(cluster_inv_ind.data()); thrust::device_vector<int> seq_idx(n_vertex); thrust::sequence(seq_idx.begin(), seq_idx.end()); int* seq_idx_ptr = thrust::raw_pointer_cast(seq_idx.data()); dim3 block_size((n_vertex + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size(BLOCK_SIZE_1D, 1, 1); thrust::device_vector<T> score(1); thrust::device_vector<T> k_vec(n_vertex); thrust::device_vector<T> Q_arr(n_vertex); thrust::device_vector<T> delta_Q_arr(csr_ptr_d[n_vertex]); thrust::device_vector<T> cluster_sum_vec(c_size); T* score_ptr = thrust::raw_pointer_cast(score.data()); T* k_vec_ptr = thrust::raw_pointer_cast(k_vec.data()); T* Q_arr_ptr = thrust::raw_pointer_cast(Q_arr.data()); T* cluster_sum_vec_ptr = thrust::raw_pointer_cast(cluster_sum_vec.data()); T* delta_Q_arr_ptr = thrust::raw_pointer_cast(delta_Q_arr.data()); int* csr_ptr_ptr = thrust::raw_pointer_cast(csr_ptr_d.data()); int* csr_ind_ptr = thrust::raw_pointer_cast(csr_ind_d.data()); T* csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data()); int* cluster_ptr = thrust::raw_pointer_cast(cluster_d.data()); hr_clock.start(); nvlouvain::generate_cluster_inv(n_vertex, c_size, cluster_d.begin(), cluster_inv_ptr, cluster_inv_ind); hr_clock.stop(&timed); diff_time = timed; weighted = true; #ifdef VERBOSE printf("cluster inv: \n"); nvlouvain::display_vec(cluster_inv_ptr); nvlouvain::display_vec(cluster_inv_ind); #endif std::cout<<"cluster inv rumtime: "<<diff_time<<" us\n"; T m2 = thrust::reduce(thrust::cuda::par, csr_val_d.begin(), csr_val_d.end()); hr_clock.start(); double Q = nvlouvain::modularity(n_vertex, csr_ptr_d[n_vertex],c_size, m2, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr); /* nvlouvain::kernel_modularity_no_matrix<<<block_size, grid_size >>>(n_vertex, c_size, m2, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), cluster_inv_ptr.begin(), cluster_inv_ind.begin(), weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr,score_ptr); CUDA_CALL(cudaDeviceSynchronize()); double Q = score[0]; */ hr_clock.stop(&timed); diff_time = timed; #ifdef VERBOSE printf("Q_arr: \n"); nvlouvain::display_vec(Q_arr); printf("k_vec: \n"); nvlouvain::display_vec(k_vec); #endif printf("modularity(w/o block): %.10e runtime: ",Q); std::cout<<diff_time<<std::endl; //==================== int side = (n_vertex + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D; dim3 block_size_2d(side,side,1); dim3 grid_size_2d(BLOCK_SIZE_1D, BLOCK_SIZE_1D, 1); hr_clock.start(); nvlouvain::build_delta_modularity_vec<<<block_size_2d, grid_size_2d>>>(n_vertex, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), delta_Q_arr_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); diff_time = timed; #ifdef VERBOSE nvlouvain::display_vec(Q_arr); #endif std::cout<<"delta (w block) rumtime: "<<diff_time<<" us\n"; //==================== /* hr_clock.start(); nvlouvain::kernel_compute_cluster_sum<<<block_size, grid_size>>>( n_vertex, c_size, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, k_vec_ptr, cluster_sum_vec_ptr); CUDA_CALL(cudaDeviceSynchronize()); #ifdef VERBOSE nvlouvain::display_vec(cluster_sum_vec); #endif nvlouvain::build_delta_modularity_vec<<<block_size_2d, grid_size_2d>>>(n_vertex, c_size, m2 csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), cluster_sum_vec_ptr, k_vec_ptr, delta_Q_arr_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); diff_time = timed; #ifdef VERBOSE nvlouvain::display_vec(Q_arr); #endif std::cout<<"delta (wo block)rumtime: "<<diff_time<<" us\n"; */ }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/test/k_compute_test.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <string> #include "test_opt_utils.h" #include "graph_utils.cuh" #include "louvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include "util.cuh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> /* template< typename IdxType, typename ValType > __global__ void kernal_test(const int size, IdxType* csr_ptr, ValType* csr_val, int i, ValType* result){ int idx = blockDim.x*blockIdx.x + threadIdx.x; if(idx < size){ nvlouvain::compute_k(size, csr_ptr, csr_val, idx, &result[idx]); //printf("k%d = %f\n", idx ,result[idx]); } return; } template< typename IdxIter, typename ValIter, typename ValType > __global__ void kernal_test_iter(const int size, IdxIter csr_ptr_iter, ValIter csr_val_iter, int i, ValType* result){ int idx = blockDim.x*blockIdx.x + threadIdx.x; if(idx < size){ //printf("start compute k with iter passing. (%d, %d, %d) idx = %d %f\n", blockDim.x, blockIdx.x, threadIdx.x, idx, result[idx]); nvlouvain::compute_k(size, csr_ptr_iter, csr_val_iter, idx, &result[idx]); //printf("k%d = %f\n", idx ,result[idx]); } return; } template< typename IdxIter, typename ValIter, typename DevPtr > __global__ void kernal_test_dev_ptr(const int size, IdxIter csr_ptr_iter, ValIter csr_val_iter, int i, DevPtr result){ int idx = blockDim.x*blockIdx.x + threadIdx.x; if(idx < size){ //printf("start compute k with iter passing. (%d, %d, %d) idx = %d %f\n", blockDim.x, blockIdx.x, threadIdx.x, idx, result[idx]); nvlouvain::compute_k(size, csr_ptr_iter, csr_val_iter, idx, &result[idx]); //printf("k%d = %f\n", idx ,result[idx]); } return; } void k_compute_test( thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d, int size){ HighResClock hr_clock; double timed; dim3 block_size((size + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size(BLOCK_SIZE_1D, 1, 1); std::cout<< csr_ptr_d.size()<<" "<<csr_val_d.size()<<" size:"<< size <<std::endl; int* csr_ptr_d_raw_ptr = thrust::raw_pointer_cast(csr_ptr_d.data()); T* csr_val_d_raw_ptr = thrust::raw_pointer_cast(csr_val_d.data()); thrust::device_vector<T> k_d(size); T* k_d_raw_cast_ptr = thrust::raw_pointer_cast(k_d.data()); hr_clock.start(); kernal_test<<<block_size,grid_size>>>(size , csr_ptr_d_raw_ptr, csr_val_d_raw_ptr, 0, k_d_raw_cast_ptr); CUDA_CALL(cudaDeviceSynchronize()); // nvlouvain::display_vec(k_d); hr_clock.stop(&timed); double raw_ptr_time(timed); thrust::device_vector<T> k_iter_d(size); T* k_iter_d_raw_ptr = thrust::raw_pointer_cast(k_iter_d.data()); hr_clock.start(); kernal_test_iter<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_val_d.begin(), 0, k_iter_d_raw_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double iter_time(timed); // nvlouvain::display_vec(k_iter_d); thrust::device_vector<T> k_d_ptr_d(size); hr_clock.start(); kernal_test_dev_ptr<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_val_d.begin(), 0, k_d_ptr_d.data()); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double dev_ptr_time(timed); // nvlouvain::display_vec(k_d_ptr_d); std::cout<<"raw_ptr_runtime: "<<raw_ptr_time<<"\niter_time: "<<iter_time<<"\ndev_ptr_time: "<<dev_ptr_time<<std::endl; std::cout<<"============== complete k computation test =============\n"; } */
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/test/k_in_test.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <string> #include "test_opt_utils.h" #include "graph_utils.cuh" #include "louvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include "util.cuh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> template< typename IdxIter, typename ValIter, typename ValType > __global__ void kernal_k_in_test(const int size, IdxIter csr_ptr_iter, IdxIter csr_ind_iter, ValIter csr_val_iter, IdxIter cluster_iter, int i, ValType* result){ /* //printf("successfully launch kernal\n"); int idx_x = blockDim.x*blockIdx.x + threadIdx.x; int idx_y = blockDim.y*blockIdx.y + threadIdx.y; if(idx_x < size && idx_y < size ){ int c = *( cluster_iter + idx_y); //printf(" ** %d %d\n", idx_x, idx_y); //printf("start compute k with iter passing. (%d, %d, %d) idx = %d %f\n", blockDim.x, blockIdx.x, threadIdx.x, idx, result[idx]); nvlouvain::compute_k_i_in(size, csr_ptr_iter, csr_ind_iter, csr_val_iter, cluster_iter, c, idx_x, &result[idx_x *size + idx_y ]); // n_vertex, csr_ptr_iter, csr_idx_iter, csr_val_iter, cluster_iter, c, i, result printf("k_%d_in_c%d = %f\n", idx_x, idx_y ,result[idx_x *size + idx_y]); } */ /* if(idx == 0){ nvlouvain::display_vec(csr_ptr_iter, size); nvlouvain::display_vec(csr_ind_iter, csr_ptr_iter[size]); nvlouvain::display_vec(csr_val_iter, csr_ptr_iter[size]); } */ return; } void k_i_in_compute_test( thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d, int size){ HighResClock hr_clock; double timed; dim3 block_size((size + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, (size + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, 1); dim3 grid_size(BLOCK_SIZE_2D, BLOCK_SIZE_2D, 1); std::cout<< csr_ptr_d.size()<<" "<<csr_val_d.size()<<" size:"<< size <<std::endl; thrust::device_vector<T> result_d(size * size); thrust::device_vector<int> cluster_d(size); T* result_ptr = thrust::raw_pointer_cast(result_d.data()); hr_clock.start(); int i = 0; std::cout<<"successfully declair device vector.\n"; kernal_k_in_test<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), i, result_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double iter_time(timed); nvlouvain::display_vec(result_d); std::cout<<"k_i_in runtime: "<<iter_time<<"\n"; std::cout<<"============== complete k_i_in computation test =============\n"; } /* void k_i_in_compute_for_each_with_functor(thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d, int size){ for_each_n() }*/
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/test/mem_test.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <string> #include "test_opt_utils.h" #include "graph_utils.cuh" #include "louvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include "util.cuh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/memory.h> template<typename IdxType=int, typename ValType=double> __global__ void kernel_local_mem(const int n_vertex ){ thrust::device_system_tag device_sys; thrust::pointer<ValType,thrust::device_system_tag> temp_i = thrust::malloc<ValType>(device_sys, n_vertex); // for weight on i and for sum_k thrust::pointer<IdxType,thrust::device_system_tag> temp_idx = thrust::malloc<IdxType>(device_sys, n_vertex); // for weight on i and for sum_k *temp_i = 10.0; *(temp_i + n_vertex-1) = 100.5; thrust::return_temporary_buffer(device_sys, temp_idx); thrust::return_temporary_buffer(device_sys, temp_i); } template<typename IdxType=int, typename ValType=double> __global__ void kernel_local_mem_new(const int n_vertex ){ ValType * temp_i = new ValType[n_vertex]; IdxType * temp_idx = new IdxType[n_vertex]; *temp_i = 10.0; *(temp_i + n_vertex-1) = 100.5; thrust::sequence(thrust::cuda::par, temp_idx, temp_idx + n_vertex); printf("%d %d %d ... %d\n",*temp_idx, *(temp_idx+1), *(temp_idx+2), *(temp_idx + n_vertex - 1) ); delete [] temp_i; delete [] temp_idx; } void mem_allocate_test(const int size){ HighResClock hr_clock; double timed; dim3 block_size((size + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size(BLOCK_SIZE_1D, 1, 1); hr_clock.start(); kernel_local_mem<<<block_size,grid_size>>>(30000); kernel_local_mem_new<<<block_size,grid_size>>>(30000); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double raw_ptr_time(timed); std::cout<<"allocate_mem_runtime: "<<raw_ptr_time<<std::endl; }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/test/phase_1_test.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <string> #include "test_opt_utils.h" #include "graph_utils.cuh" #include "louvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include "util.cuh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> template<typename IdxIter, typename ValIter, typename IdxType=int, typename ValType> __global__ void kernel_phase_1(const int n_vertex, IdxIter csr_ptr_iter, IdxIter csr_ind_iter, ValIter csr_val_iter, IdxIter cluster, ValType *matrix, IdxType *cluster_sizes, ValType* improve, IdxType* n_moved){ *n_moved = 0; IdxType j = blockIdx.x * blockDim.x + threadIdx.x; IdxType i = blockIdx.y * blockDim.y + threadIdx.y; if( i< n_vertex && j<n_vertex){ //printf("i:%d j:%d start:%d end:%d c:%d\n",i,j,start_idx, end_idx,c); nvlouvain::phase_1( n_vertex, csr_ptr_iter, csr_ind_iter, csr_val_iter, cluster, i, j, j, matrix, cluster_sizes, improve, n_moved); } } void phase_1_test(thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d, const int size){ /* HighResClock hr_clock; double timed; dim3 block_size((size + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, (size + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, 1); dim3 grid_size(BLOCK_SIZE_2D, BLOCK_SIZE_2D, 1); thrust::device_vector<int> cluster_d(size); thrust::sequence(cluster_d.begin(), cluster_d.end()); std::cout<<"old cluster: "; //nvlouvain::display_vec(cluster_d); thrust::device_vector<T> Q_d(1); T* Q_d_raw_ptr = thrust::raw_pointer_cast(Q_d.data()); thrust::device_vector<T> matrix(size*size); T* matrix_raw_ptr = thrust::raw_pointer_cast(matrix.data()); hr_clock.start(); kernel_modularity<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), matrix_raw_ptr, Q_d_raw_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double mod_time(timed); std::cout<<"modularity: "<<Q_d[0]<<" runtime: "<<mod_time<<std::endl; thrust::device_vector<T> improve_d(1); T* improve_d_raw_ptr = thrust::raw_pointer_cast(improve_d.data()); thrust::device_vector<int> c_size_d(size, 1); int* c_size_d_raw_ptr = thrust::raw_pointer_cast(c_size_d.data()); thrust::device_vector<int> n_moved(1, 0); int* n_moved_ptr = thrust::raw_pointer_cast(n_moved.data()); //-------------------------------- 1st - thrust::device_vector<T> Q_old(Q_d); double delta_Q; int count = 0; int num_move = 0; do{ Q_old[0] = Q_d[0]; hr_clock.start(); kernel_phase_1<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), matrix_raw_ptr, c_size_d_raw_ptr, improve_d_raw_ptr, n_moved_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); mod_time = timed; std::cout<<"new cluster: "; //nvlouvain::display_vec(cluster_d); std::cout<<"improvement: "<<improve_d[0]<<" runtime: "<<mod_time<<std::endl; kernel_modularity<<<block_size, grid_size>>>(size, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), matrix_raw_ptr, Q_d_raw_ptr); CUDA_CALL(cudaDeviceSynchronize()); delta_Q = Q_d[0] - Q_old[0]; std::cout<<"new modularity: "<<Q_d[0]<<" delta_Q:"<<delta_Q<<" runtime: "<<mod_time<<std::endl; std::cout<<"cluster size: "; nvlouvain::display_vec(c_size_d); int sum = thrust::reduce(thrust::cuda::par, c_size_d.begin(), c_size_d.end(), 0); num_move = n_moved[0]; std::cout<<"sum: "<< sum<<" moved: "<<num_move<<std::endl; ++count; }while( num_move > 0 && count <5); */ }
0
rapidsai_public_repos/nvgraph/cpp/include
rapidsai_public_repos/nvgraph/cpp/include/test/modularity_test.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <fstream> #include <string> #include "test_opt_utils.h" #include "graph_utils.cuh" #include "louvain.cuh" #include "gtest/gtest.h" #include "high_res_clock.h" #include "util.cuh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> void modularity_test_no_matrix(thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d, const int size, const bool weighted){ HighResClock hr_clock; double timed; dim3 block_size((size + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size(BLOCK_SIZE_1D, 1, 1); std::cout<<"n_vertex: "<<size<<std::endl; std::vector<int> cluster; thrust::device_vector<int> cluster_d(size); // thrust::sequence(cluster_d.begin(), cluster_d.end()); // std::cout<<"cluster: "; //nvlouvain::display_vec(cluster_d); thrust::device_vector<T> score(1); thrust::device_vector<T> k_vec(size); thrust::device_vector<T> Q_arr(size); thrust::device_vector<T> temp_i(csr_ptr_d[size]); thrust::device_vector<int> cluster_inv_ptr(size+1); thrust::device_vector<int> cluster_inv_ind(size); thrust::sequence(thrust::cuda::par, cluster_inv_ptr.begin(), cluster_inv_ptr.end()); thrust::sequence(thrust::cuda::par, cluster_inv_ind.begin(), cluster_inv_ind.end()); thrust::fill(thrust::device, temp_i.begin(), temp_i.end(), 0.0); // nvlouvain::display_vec(temp_i); T* score_ptr = thrust::raw_pointer_cast(score.data()); T* k_vec_ptr = thrust::raw_pointer_cast(k_vec.data()); T* Q_arr_ptr = thrust::raw_pointer_cast(Q_arr.data()); T* temp_i_ptr = thrust::raw_pointer_cast(temp_i.data()); int* csr_ptr_ptr = thrust::raw_pointer_cast(csr_ptr_d.data()); int* csr_ind_ptr = thrust::raw_pointer_cast(csr_ind_d.data()); T* csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data()); int* cluster_inv_ptr_ptr = thrust::raw_pointer_cast(cluster_inv_ptr.data()); int* cluster_inv_ind_ptr = thrust::raw_pointer_cast(cluster_inv_ind.data()); int* cluster_ptr = thrust::raw_pointer_cast(cluster_d.data()); hr_clock.start(); T m2 = thrust::reduce(thrust::cuda::par, csr_val_d.begin(), csr_val_d.end()); nvlouvain::generate_cluster_inv(size, size, cluster_d.begin(), cluster_inv_ptr, cluster_inv_ind); double Q = nvlouvain::modularity(size, csr_ptr_d[size], size, m2, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, temp_i_ptr); /* nvlouvain::kernel_modularity_no_matrix<<<block_size, grid_size >>>(size, size, m2, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), cluster_inv_ptr.begin(), cluster_inv_ind.begin(), weighted, k_vec_ptr, Q_arr_ptr, temp_i_ptr, score_ptr); CUDA_CALL(cudaDeviceSynchronize()); double Q = score[0]; */ hr_clock.stop(&timed); double mod_time(timed); printf("modularity(w/o block): %.10e runtime: ",Q); std::cout<<mod_time<<std::endl; /* for(auto const & it:Q_arr) { std::cout<<it<<" ,"; } std::cout<<std::endl; */ } void modularity_test_no_matrix_block(thrust::device_vector<int> &csr_ptr_d, thrust::device_vector<int> &csr_ind_d, thrust::device_vector<T> &csr_val_d, const int size, const bool weighted){ HighResClock hr_clock; double timed; dim3 block_size((size + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size(BLOCK_SIZE_1D, 1, 1); std::cout<<"n_vertex: "<<size<<std::endl; thrust::device_vector<int> cluster_d(size); thrust::sequence(cluster_d.begin(), cluster_d.end()); //std::cout<<"cluster: "; //nvlouvain::display_vec(cluster_d); thrust::device_vector<T> score(1); thrust::device_vector<T> k_vec(size); thrust::device_vector<T> Q_arr(size); T* score_ptr = thrust::raw_pointer_cast(score.data()); T* k_vec_ptr = thrust::raw_pointer_cast(k_vec.data()); T* Q_arr_ptr = thrust::raw_pointer_cast(Q_arr.data()); int n_edges = csr_ptr_d[size]; T m2 = thrust::reduce(thrust::cuda::par, csr_val_d.begin(), csr_val_d.end()+ n_edges); hr_clock.start(); nvlouvain::kernel_modularity_no_matrix_block<<<block_size, grid_size>>>(size, m2, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), weighted, k_vec_ptr, Q_arr_ptr); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); double mod_time(timed); double Q = thrust::reduce(thrust::cuda::par, Q_arr_ptr, Q_arr_ptr + size, (0.0)); printf("modularity(w/ block): %.10e runtime: ",Q); std::cout<<mod_time<<std::endl; /* for(auto const & it:Q_arr) { std::cout<<it<<" ,"; } std::cout<<std::endl; */ } /* void modularity_test_no_matrix(std::string file_name){ HighResClock hr_clock; double timed; std::ifstream inf(file_name); thrust::device_vector<int> csr_ptr_d; thrust::device_vector<int> csr_ind_d, thrust::device_vector<T> csr_val_d; const int size; bool weighted = truel dim3 block_size((size + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size(BLOCK_SIZE_1D, 1, 1); std::cout<<"n_vertex: "<<size<<std::endl; std::vector<int> cluster; thrust::device_vector<int> cluster_d(size); // thrust::sequence(cluster_d.begin(), cluster_d.end()); // std::cout<<"cluster: "; //nvlouvain::display_vec(cluster_d); thrust::device_vector<T> score(1); thrust::device_vector<T> k_vec(size); thrust::device_vector<T> Q_arr(size); thrust::device_vector<T> temp_i(csr_ptr_d[size]); thrust::device_vector<int> cluster_inv_ptr(size+1); thrust::device_vector<int> cluster_inv_ind(size); thrust::sequence(thrust::cuda::par, cluster_inv_ptr.begin(), cluster_inv_ptr.end()); thrust::sequence(thrust::cuda::par, cluster_inv_ind.begin(), cluster_inv_ind.end()); thrust::fill(thrust::device, temp_i.begin(), temp_i.end(), 0.0); // nvlouvain::display_vec(temp_i); T* score_ptr = thrust::raw_pointer_cast(score.data()); T* k_vec_ptr = thrust::raw_pointer_cast(k_vec.data()); T* Q_arr_ptr = thrust::raw_pointer_cast(Q_arr.data()); T* temp_i_ptr = thrust::raw_pointer_cast(temp_i.data()); hr_clock.start(); T m2 = thrust::reduce(thrust::cuda::par, csr_val_d.begin(), csr_val_d.end()); nvlouvain::generate_cluster_inv(size, c_size, cluster_d.begin(), cluster_inv_ptr, cluster_inv_ind); double Q = nvlouvain::modularity(size, size, m2, csr_ptr_d, csr_ind_d, csr_val_d, cluster_d, cluster_inv_ptr, cluster_inv_ind, weighted, k_vec_ptr, Q_arr_ptr, temp_i_ptr); nvlouvain::kernel_modularity_no_matrix<<<block_size, grid_size >>>(size, size, m2, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), cluster_inv_ptr.begin(), cluster_inv_ind.begin(), weighted, k_vec_ptr, Q_arr_ptr, temp_i_ptr, score_ptr); CUDA_CALL(cudaDeviceSynchronize()); double Q = score[0]; hr_clock.stop(&timed); double mod_time(timed); printf("modularity(w/o block): %.10e runtime: ",Q); std::cout<<mod_time<<std::endl; } */
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_capi_tests_2d_bfs_net.cpp
// This is gtest application that contains all of the C API tests. Parameters: // nvgraph_capi_tests [--perf] [--stress-iters N] [--gtest_filter=NameFilterPatter] // It also accepts any other gtest (1.7.0) default parameters. // Right now this application contains: // 1) Sanity Check tests - tests on simple examples with known answer (or known behaviour) // 2) Correctness checks tests - tests on real graph data, uses reference algorithm // (CPU code for SrSPMV and python scripts for other algorithms, see // python scripts here: //sw/gpgpu/nvgraph/test/ref/) with reference results, compares those two. // It also measures performance of single algorithm C API call, enf enabled (see below) // 3) Corner cases tests - tests with some bad inputs, bad parameters, expects library to handle // it gracefully // 4) Stress tests - makes sure that library result is persistent throughout the library usage // (a lot of C API calls). Also makes some assumptions and checks on memory usage during // this test. // // We can control what tests to launch by using gtest filters. For example: // Only sanity tests: // ./nvgraph_capi_tests_traversal --gtest_filter=*Sanity* // And, correspondingly: // ./nvgraph_capi_tests_traversal --gtest_filter=*Correctness* // ./nvgraph_capi_tests_traversal --gtest_filter=*Corner* // ./nvgraph_capi_tests_traversal --gtest_filter=*Stress* // Or, combination: // ./nvgraph_capi_tests_traversal --gtest_filter=*Sanity*:*Correctness* // // Performance reports are provided in the ERIS format and disabled by default. // Could be enabled by adding '--perf' to the command line. I added this parameter to vlct // // Parameter '--stress-iters N', which gives multiplier (not an absolute value) for the number of launches for stress tests // #include <utility> #include "gtest/gtest.h" #include "nvgraph_test_common.h" #include "valued_csr_graph.hxx" #include "readMatrix.hxx" #include "nvgraphP.h" #include "nvgraph.h" #include <nvgraph_experimental.h> // experimental header, contains hidden API entries, can be shared only under special circumstances without reveling internal things #include "stdlib.h" #include <algorithm> #include <numeric> #include <queue> #include <sstream> #include <cstdint> #include <math.h> #include "cuda_profiler_api.h" // do the perf measurements, enabled by command line parameter '--perf' static int PERF = 0; // minimum vertices in the graph to perform perf measurements #define PERF_ROWS_LIMIT 10000 // number of repeats = multiplier/num_vertices #define Traversal_ITER_MULTIPLIER 30000000 template<typename T> struct nvgraph_Const; template<> struct nvgraph_Const<int> { static const cudaDataType_t Type = CUDA_R_32I; static const int inf; }; const int nvgraph_Const<int>::inf = INT_MAX; typedef struct Traversal_Usecase_t { std::string graph_file; int source_vert; size_t n; size_t nnz; bool useMask; bool undirected; Traversal_Usecase_t(const std::string& a, int source, size_t _n, size_t _nnz, bool _useMask = false, bool _undirected = false) : source_vert(source), n(_n), nnz(_nnz), useMask(_useMask), undirected(_undirected) { graph_file = a; }; Traversal_Usecase_t& operator=(const Traversal_Usecase_t& rhs) { graph_file = rhs.graph_file; source_vert = rhs.source_vert; n = rhs.n; nnz = rhs.nnz; useMask = rhs.useMask; return *this; } } Traversal_Usecase; //// Traversal tests class NVGraphCAPITests_2d_bfs: public ::testing::TestWithParam<Traversal_Usecase> { public: NVGraphCAPITests_2d_bfs() : handle(NULL) { } static void SetupTestCase() { } static void TearDownTestCase() { } virtual void SetUp() { if (handle == NULL) { char* nvgraph_gpus = getenv("NVGRAPH_GPUS"); if (nvgraph_gpus) printf("Value of NVGRAPH_GPUS=%s\n", nvgraph_gpus); else printf("Value of NVGRAPH_GPUS is null\n"); std::vector<int32_t> gpus; int32_t dummy; std::stringstream ss(nvgraph_gpus); while (ss >> dummy){ gpus.push_back(dummy); if (ss.peek() == ',') ss.ignore(); } printf("There were %d devices found: ", (int)gpus.size()); for (int i = 0; i < gpus.size(); i++) std::cout << gpus[i] << " "; std::cout << "\n"; devices = (int32_t*) malloc(sizeof(int32_t) * gpus.size()); for (int i = 0; i < gpus.size(); i++) devices[i] = gpus[i]; numDevices = gpus.size(); status = nvgraphCreateMulti(&handle, numDevices, devices); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; if (devices) free(devices); } } nvgraphStatus_t status; nvgraphHandle_t handle; int32_t *devices; int32_t numDevices; template<typename EdgeT> void run_current_test(const Traversal_Usecase& param) { const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << param.source_vert; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file) + std::string("_") + ss.str().c_str(); nvgraphTopologyType_t topo = NVGRAPH_2D_32I_32I; nvgraphStatus_t status; // Read in graph from network file std::vector<int32_t> sources; std::vector<int32_t> destinations; readNetworkFile(param.graph_file.c_str(), param.nnz, sources, destinations); // Create graph handle nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph int n = param.n; int nnz = param.nnz; int blockN = std::max(2,(int)ceil(sqrt(numDevices))); std::cout << "Using " << blockN << " as block N\n"; nvgraph2dCOOTopology32I_st topology = { n, nnz, &sources[0], &destinations[0], CUDA_R_32I, NULL, blockN, devices, numDevices, NVGRAPH_DEFAULT }; status = nvgraphSetGraphStructure(handle, g1, (void*) &topology, topo); // set up graph data std::vector<int> calculated_distances_res(n); std::vector<int> calculated_predecessors_res(n); int source_vert = param.source_vert; std::cout << "Starting from vertex: " << source_vert << "\n"; cudaProfilerStart(); status = nvgraph2dBfs(handle, g1, source_vert, &calculated_distances_res[0], &calculated_predecessors_res[0]); cudaProfilerStop(); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDeviceSynchronize(); if (PERF && n > PERF_ROWS_LIMIT) { double start, stop; start = second(); int repeat = 30; for (int i = 0; i < repeat; i++) { status = nvgraph2dBfs(handle, g1, source_vert, &calculated_distances_res[0], &calculated_predecessors_res[0]); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } cudaDeviceSynchronize(); stop = second(); printf("&&&& PERF Time_%s %10.8f -ms\n", test_id.c_str(), 1000.0 * (stop - start) / repeat); } ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //Checking distances int visitedCount = 0; for (int i = 0; i < n; ++i) { if (calculated_distances_res[i] != -1) visitedCount++; } std::cout << "There were " << visitedCount << " vertices visited.\n"; status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_2d_bfs, CheckResult) { run_current_test<float>(GetParam()); } INSTANTIATE_TEST_CASE_P(CorrectnessCheck, NVGraphCAPITests_2d_bfs, ::testing::Values( Traversal_Usecase("/mnt/nvgraph_test_data/Rmat100Mvertices2Bedges.net", 3, 100000000, 2000000000) )); int main(int argc, char **argv) { for (int i = 0; i < argc; i++) { if (strcmp(argv[i], "--perf") == 0) PERF = 1; } srand(42); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/mm.hxx
#pragma once #include <stdio.h> extern "C" { #include "mmio.h" } #include <thrust/sort.h> /// Read matrix properties from Matrix Market file /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param t (Output) MM_typecode with matrix properties. * @param m (Output) Number of matrix rows. * @param n (Output) Number of matrix columns. * @param nnz (Output) Number of non-zero matrix entries. * @return Zero if properties were read successfully. Otherwise * non-zero. */ template <typename IndexType_> int mm_properties(FILE * f, int tg, MM_typecode * t, IndexType_ * m, IndexType_ * n, IndexType_ * nnz) { // Read matrix properties from file int mint, nint, nnzint; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(*t) || !mm_is_coordinate(*t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&mint,&nint,&nnzint)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(*t) && !mm_is_real(*t) && !mm_is_integer(*t) && !mm_is_complex(*t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } *m = mint; *n = nint; *nnz = nnzint; // Find total number of non-zero entries if(tg && !mm_is_general(*t)) { // Non-diagonal entries should be counted twice IndexType_ nnzOld = *nnz; *nnz *= 2; // Diagonal entries should not be double-counted int i; int st; for(i=0; i<nnzOld; ++i) { // Read matrix entry IndexType_ row, col; double rval, ival; if (mm_is_pattern(*t)) st = fscanf(f, "%d %d\n", &row, &col); else if (mm_is_real(*t) || mm_is_integer(*t)) st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Check if entry is diagonal if(row == col) --(*nnz); } } return 0; } /// Read Matrix Market file and convert to COO format matrix /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param nnz Number of non-zero matrix entries. * @param cooRowInd (Output) Row indices for COO matrix. Should have * at least nnz entries. * @param cooColInd (Output) Column indices for COO matrix. Should * have at least nnz entries. * @param cooRVal (Output) Real component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @param cooIVal (Output) Imaginary component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @return Zero if matrix was read successfully. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> int mm_to_coo(FILE *f, int tg, IndexType_ nnz, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal , ValueType_ * cooIVal) { // Read matrix properties from file MM_typecode t; int m, n, nnzOld; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,&t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(t) || !mm_is_coordinate(t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&m,&n,&nnzOld)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(t) && !mm_is_real(t) && !mm_is_integer(t) && !mm_is_complex(t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } // Add each matrix entry in file to COO format matrix IndexType_ i; // Entry index in Matrix Market file IndexType_ j = 0; // Entry index in COO format matrix for(i=0;i<nnzOld;++i) { // Read entry from file int row, col; double rval, ival; int st; if (mm_is_pattern(t)) { st = fscanf(f, "%d %d\n", &row, &col); rval = 1.0; ival = 0.0; } else if (mm_is_real(t) || mm_is_integer(t)) { st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); ival = 0.0; } else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Switch to 0-based indexing --row; --col; // Record entry cooRowInd[j] = row; cooColInd[j] = col; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; // Add symmetric complement of non-diagonal entries if(tg && !mm_is_general(t) && (row!=col)) { // Modify entry value if matrix is skew symmetric or Hermitian if(mm_is_skew(t)) { rval = -rval; ival = -ival; } else if(mm_is_hermitian(t)) { ival = -ival; } // Record entry cooRowInd[j] = col; cooColInd[j] = row; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; } } return 0; } /// Compare two tuples based on the element indexed by i class lesser_tuple { const int i; public: lesser_tuple(int _i) : i(_i) {} template<typename Tuple1, typename Tuple2> __host__ __device__ bool operator()(const Tuple1 t1, const Tuple2 t2) { switch(i) { case 0: return (thrust::get<0>(t1) < thrust::get<0>(t2)); case 1: return (thrust::get<1>(t1) < thrust::get<1>(t2)); default: return (thrust::get<0>(t1) < thrust::get<0>(t2)); } } }; /// Sort entries in COO format matrix /** Sort is stable. * * @param nnz Number of non-zero matrix entries. * @param sort_by_row Boolean indicating whether matrix entries * will be sorted by row index or by column index. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component for COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component COO matrix entries. Ignored if * null pointer. */ template <typename IndexType_, typename ValueType_> void coo_sort(IndexType_ nnz, int sort_by_row, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal, ValueType_ * cooIVal) { // Determine whether to sort by row or by column int i; if(sort_by_row == 0) i = 1; else i = 0; // Apply stable sort using namespace thrust; if((cooRVal==NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz)), lesser_tuple(i)); else if((cooRVal==NULL) && (cooIVal!=NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooIVal+nnz)), lesser_tuple(i)); else if((cooRVal!=NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooRVal+nnz)), lesser_tuple(i)); else stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz, cooRVal+nnz,cooIVal+nnz)), lesser_tuple(i)); } /// Compress sorted list of indices /** For use in converting COO format matrix to CSR or CSC format. * * @param n Maximum index. * @param nnz Number of non-zero matrix entries. * @param sortedIndices Sorted list of indices (COO format). * @param compressedIndices (Output) Compressed list of indices (CSR * or CSC format). Should have at least n+1 entries. */ template <typename IndexType_> void coo_compress(IndexType_ n, IndexType_ nnz, const IndexType_ * __restrict__ sortedIndices, IndexType_ * __restrict__ compressedIndices) { IndexType_ i; // Initialize everything to zero memset(compressedIndices, 0, (n+1)*sizeof(IndexType_)); // Count number of elements per row for(i=0; i<nnz; ++i) ++(compressedIndices[sortedIndices[i]+1]); // Compute cumulative sum to obtain row offsets/pointers for(i=0; i<n; ++i) compressedIndices[i+1] += compressedIndices[i]; } /// Convert COO format matrix to CSR format /** On output, matrix entries in COO format matrix will be sorted * (primarily by row index, secondarily by column index). * * @param m Number of matrix rows. * @param n Number of matrix columns. * @param nnz Number of non-zero matrix entries. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component of COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component of COO matrix entries. Ignored * if null pointer. * @param csrRowPtr Row pointers for CSR matrix. Should have at least * n+1 entries. * @param csrColInd Column indices for CSR matrix (identical to * output of cooColInd). Should have at least nnz entries. Ignored if * null pointer. * @param csrRVal Real component of CSR matrix entries (identical to * output of cooRVal). Should have at least nnz entries. Ignored if * null pointer. * @param csrIVal Imaginary component of CSR matrix entries * (identical to output of cooIVal). Should have at least nnz * entries. Ignored if null pointer. * @return Zero if matrix was converted successfully. Otherwise * non-zero. */ template <typename IndexType_, typename ValueType_> int coo_to_csr(IndexType_ m, IndexType_ n, IndexType_ nnz, IndexType_ * __restrict__ cooRowInd, IndexType_ * __restrict__ cooColInd, ValueType_ * __restrict__ cooRVal, ValueType_ * __restrict__ cooIVal, IndexType_ * __restrict__ csrRowPtr, IndexType_ * __restrict__ csrColInd, ValueType_ * __restrict__ csrRVal, ValueType_ * __restrict__ csrIVal) { // Convert COO to CSR matrix coo_sort(nnz, 0, cooRowInd, cooColInd, cooRVal, cooIVal); coo_sort(nnz, 1, cooRowInd, cooColInd, cooRVal, cooIVal); coo_compress(n, nnz, cooRowInd, csrRowPtr); // Copy arrays if(csrColInd!=NULL) memcpy(csrColInd, cooColInd, nnz*sizeof(IndexType_)); if((cooRVal!=NULL) && (csrRVal!=NULL)) memcpy(csrRVal, cooRVal, nnz*sizeof(ValueType_)); if((cooIVal!=NULL) && (csrIVal!=NULL)) memcpy(csrIVal, cooIVal, nnz*sizeof(ValueType_)); return 0; }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/mmio.c
/* * Matrix Market I/O library for ANSI C * * See http://math.nist.gov/MatrixMarket for details. * * */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #include "mmio.h" int mm_read_unsymmetric_sparse(const char *fname, int *M_, int *N_, int *nz_, double **val_, int **I_, int **J_) { FILE *f; MM_typecode matcode; int M, N, nz; int i; double *val; int *I, *J; if ((f = fopen(fname, "r")) == NULL) return -1; if (mm_read_banner(f, &matcode) != 0) { printf("mm_read_unsymetric: Could not process Matrix Market banner "); printf(" in file [%s]\n", fname); return -1; } if ( !(mm_is_real(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode))) { fprintf(stderr, "Sorry, this application does not support "); fprintf(stderr, "Market Market type: [%s]\n", mm_typecode_to_str(matcode)); return -1; } /* find out size of sparse matrix: M, N, nz .... */ if (mm_read_mtx_crd_size(f, &M, &N, &nz) !=0) { fprintf(stderr, "read_unsymmetric_sparse(): could not parse matrix size.\n"); return -1; } *M_ = M; *N_ = N; *nz_ = nz; /* reseve memory for matrices */ I = (int *) malloc(nz * sizeof(int)); J = (int *) malloc(nz * sizeof(int)); val = (double *) malloc(nz * sizeof(double)); *val_ = val; *I_ = I; *J_ = J; /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i=0; i<nz; i++) { fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]); I[i]--; /* adjust from 1-based to 0-based */ J[i]--; } fclose(f); return 0; } int mm_is_valid(MM_typecode matcode) { if (!mm_is_matrix(matcode)) return 0; if (mm_is_dense(matcode) && mm_is_pattern(matcode)) return 0; if (mm_is_real(matcode) && mm_is_hermitian(matcode)) return 0; if (mm_is_pattern(matcode) && (mm_is_hermitian(matcode) || mm_is_skew(matcode))) return 0; return 1; } int mm_read_banner(FILE *f, MM_typecode *matcode) { char line[MM_MAX_LINE_LENGTH]; char banner[MM_MAX_TOKEN_LENGTH]; char mtx[MM_MAX_TOKEN_LENGTH]; char crd[MM_MAX_TOKEN_LENGTH]; char data_type[MM_MAX_TOKEN_LENGTH]; char storage_scheme[MM_MAX_TOKEN_LENGTH]; char *p; mm_clear_typecode(matcode); if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; if (sscanf(line, "%s %s %s %s %s", banner, mtx, crd, data_type, storage_scheme) != 5) return MM_PREMATURE_EOF; for (p=mtx; *p!='\0'; *p=tolower(*p),p++); /* convert to lower case */ for (p=crd; *p!='\0'; *p=tolower(*p),p++); for (p=data_type; *p!='\0'; *p=tolower(*p),p++); for (p=storage_scheme; *p!='\0'; *p=tolower(*p),p++); /* check for banner */ if (strncmp(banner, MatrixMarketBanner, strlen(MatrixMarketBanner)) != 0) return MM_NO_HEADER; /* first field should be "mtx" */ if (strcmp(mtx, MM_MTX_STR) != 0) return MM_UNSUPPORTED_TYPE; mm_set_matrix(matcode); /* second field describes whether this is a sparse matrix (in coordinate storgae) or a dense array */ if (strcmp(crd, MM_SPARSE_STR) == 0) mm_set_sparse(matcode); else if (strcmp(crd, MM_DENSE_STR) == 0) mm_set_dense(matcode); else return MM_UNSUPPORTED_TYPE; /* third field */ if (strcmp(data_type, MM_REAL_STR) == 0) mm_set_real(matcode); else if (strcmp(data_type, MM_COMPLEX_STR) == 0) mm_set_complex(matcode); else if (strcmp(data_type, MM_PATTERN_STR) == 0) mm_set_pattern(matcode); else if (strcmp(data_type, MM_INT_STR) == 0) mm_set_integer(matcode); else return MM_UNSUPPORTED_TYPE; /* fourth field */ if (strcmp(storage_scheme, MM_GENERAL_STR) == 0) mm_set_general(matcode); else if (strcmp(storage_scheme, MM_SYMM_STR) == 0) mm_set_symmetric(matcode); else if (strcmp(storage_scheme, MM_HERM_STR) == 0) mm_set_hermitian(matcode); else if (strcmp(storage_scheme, MM_SKEW_STR) == 0) mm_set_skew(matcode); else return MM_UNSUPPORTED_TYPE; return 0; } int mm_write_mtx_crd_size(FILE *f, int M, int N, int nz) { if (fprintf(f, "%d %d %d\n", M, N, nz) != 3) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_read_mtx_crd_size(FILE *f, int *M, int *N, int *nz ) { char line[MM_MAX_LINE_LENGTH]; int num_items_read; /* set return null parameter values, in case we exit with errors */ *M = *N = *nz = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line,MM_MAX_LINE_LENGTH,f) == NULL) return MM_PREMATURE_EOF; }while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%d %d %d", M, N, nz) == 3) return 0; else do { num_items_read = fscanf(f, "%d %d %d", M, N, nz); if (num_items_read == EOF) return MM_PREMATURE_EOF; } while (num_items_read != 3); return 0; } int mm_read_mtx_array_size(FILE *f, int *M, int *N) { char line[MM_MAX_LINE_LENGTH]; int num_items_read; /* set return null parameter values, in case we exit with errors */ *M = *N = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line,MM_MAX_LINE_LENGTH,f) == NULL) return MM_PREMATURE_EOF; }while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%d %d", M, N) == 2) return 0; else /* we have a blank line */ do { num_items_read = fscanf(f, "%d %d", M, N); if (num_items_read == EOF) return MM_PREMATURE_EOF; } while (num_items_read != 2); return 0; } int mm_write_mtx_array_size(FILE *f, int M, int N) { if (fprintf(f, "%d %d\n", M, N) != 2) return MM_COULD_NOT_WRITE_FILE; else return 0; } /*-------------------------------------------------------------------------*/ /******************************************************************/ /* use when I[], J[], and val[]J, and val[] are already allocated */ /******************************************************************/ int mm_read_mtx_crd_data(FILE *f, int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode) { int i; if (mm_is_complex(matcode)) { for (i=0; i<nz; i++) if (fscanf(f, "%d %d %lg %lg", &I[i], &J[i], &val[2*i], &val[2*i+1]) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { for (i=0; i<nz; i++) { if (fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]) != 3) return MM_PREMATURE_EOF; } } else if (mm_is_pattern(matcode)) { for (i=0; i<nz; i++) if (fscanf(f, "%d %d", &I[i], &J[i]) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } int mm_read_mtx_crd_entry(FILE *f, int *I, int *J, double *real, double *imag, MM_typecode matcode) { if (mm_is_complex(matcode)) { if (fscanf(f, "%d %d %lg %lg", I, J, real, imag) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { if (fscanf(f, "%d %d %lg\n", I, J, real) != 3) return MM_PREMATURE_EOF; } else if (mm_is_pattern(matcode)) { if (fscanf(f, "%d %d", I, J) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } /************************************************************************ mm_read_mtx_crd() fills M, N, nz, array of values, and return type code, e.g. 'MCRS' if matrix is complex, values[] is of size 2*nz, (nz pairs of real/imaginary values) ************************************************************************/ int mm_read_mtx_crd(char *fname, int *M, int *N, int *nz, int **I, int **J, double **val, MM_typecode *matcode) { int ret_code; FILE *f; if (strcmp(fname, "stdin") == 0) f=stdin; else if ((f = fopen(fname, "r")) == NULL) return MM_COULD_NOT_READ_FILE; if ((ret_code = mm_read_banner(f, matcode)) != 0) return ret_code; if (!(mm_is_valid(*matcode) && mm_is_sparse(*matcode) && mm_is_matrix(*matcode))) return MM_UNSUPPORTED_TYPE; if ((ret_code = mm_read_mtx_crd_size(f, M, N, nz)) != 0) return ret_code; *I = (int *) malloc(*nz * sizeof(int)); *J = (int *) malloc(*nz * sizeof(int)); *val = NULL; if (mm_is_complex(*matcode)) { *val = (double *) malloc(*nz * 2 * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_real(*matcode)) { *val = (double *) malloc(*nz * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_pattern(*matcode)) { ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } if (f != stdin) fclose(f); return 0; } int mm_write_banner(FILE *f, MM_typecode matcode) { char *str = mm_typecode_to_str(matcode); int ret_code; ret_code = fprintf(f, "%s %s\n", MatrixMarketBanner, str); free(str); if (ret_code !=2 ) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_write_mtx_crd(char fname[], int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode) { FILE *f; int i; if (strcmp(fname, "stdout") == 0) f = stdout; else if ((f = fopen(fname, "w")) == NULL) return MM_COULD_NOT_WRITE_FILE; /* print banner followed by typecode */ fprintf(f, "%s ", MatrixMarketBanner); fprintf(f, "%s\n", mm_typecode_to_str(matcode)); /* print matrix sizes and nonzeros */ fprintf(f, "%d %d %d\n", M, N, nz); /* print values */ if (mm_is_pattern(matcode)) for (i=0; i<nz; i++) fprintf(f, "%d %d\n", I[i], J[i]); else if (mm_is_real(matcode)) for (i=0; i<nz; i++) fprintf(f, "%d %d %20.16g\n", I[i], J[i], val[i]); else if (mm_is_complex(matcode)) for (i=0; i<nz; i++) fprintf(f, "%d %d %20.16g %20.16g\n", I[i], J[i], val[2*i], val[2*i+1]); else { if (f != stdout) fclose(f); return MM_UNSUPPORTED_TYPE; } if (f !=stdout) fclose(f); return 0; } /** * Create a new copy of a string s. mm_strdup() is a common routine, but * not part of ANSI C, so it is included here. Used by mm_typecode_to_str(). * */ char *mm_strdup(const char *s) { int len = strlen(s); char *s2 = (char *) malloc((len+1)*sizeof(char)); return strcpy(s2, s); } char *mm_typecode_to_str(MM_typecode matcode) { char buffer[MM_MAX_LINE_LENGTH]; char *types[4]; char *mm_strdup(const char *); int error =0; /* check for MTX type */ if (mm_is_matrix(matcode)) types[0] = (char*)MM_MTX_STR; else error=1; /* check for CRD or ARR matrix */ if (mm_is_sparse(matcode)) types[1] = (char*)MM_SPARSE_STR; else if (mm_is_dense(matcode)) types[1] = (char*)MM_DENSE_STR; else return NULL; /* check for element data type */ if (mm_is_real(matcode)) types[2] = (char*)MM_REAL_STR; else if (mm_is_complex(matcode)) types[2] = (char*)MM_COMPLEX_STR; else if (mm_is_pattern(matcode)) types[2] = (char*)MM_PATTERN_STR; else if (mm_is_integer(matcode)) types[2] = (char*)MM_INT_STR; else return NULL; /* check for symmetry type */ if (mm_is_general(matcode)) types[3] = (char*)MM_GENERAL_STR; else if (mm_is_symmetric(matcode)) types[3] = (char*)MM_SYMM_STR; else if (mm_is_hermitian(matcode)) types[3] = (char*)MM_HERM_STR; else if (mm_is_skew(matcode)) types[3] = (char*)MM_SKEW_STR; else return NULL; sprintf(buffer,"%s %s %s %s", types[0], types[1], types[2], types[3]); return mm_strdup(buffer); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_capi_tests_contraction.cpp
#include <iostream> #include <vector> #include <algorithm> #include <functional> #include <iterator> #include <fstream> #include <cassert> #include <sstream> #include <string> #include <cstdio> #include "gtest/gtest.h" #include "valued_csr_graph.hxx" #include "nvgraphP.h" #include "nvgraph.h" //annonymus: namespace{ template<typename Vector> void fill_contraction_data(const std::string& fname, Vector& g_row_offsets, Vector& g_col_indices, Vector& aggregates, Vector& cg_row_offsets, Vector& cg_col_indices) { typedef typename Vector::value_type T; std::ifstream m_stream(fname.c_str(), std::ifstream::in); std::string line; if( !m_stream.is_open() ) { std::stringstream ss; ss<<"ERROR: Could not open file: "<<fname; throw std::runtime_error(ss.str().c_str()); } bool keep_going = !std::getline(m_stream, line).eof(); //debug: //std::cout<<line<<std::endl; if( !keep_going ) return; char c; int g_nrows=0; int g_nnz=0; std::sscanf(line.c_str(),"%c: nrows=%d, nnz=%d",&c, &g_nrows, &g_nnz); //debug: //std::cout<<c<<","<<g_nrows<<","<<g_nnz<<"\n"; int n_entries = g_nrows+1; g_row_offsets.reserve(n_entries); //ignore next line: // if( !std::getline(m_stream, line) ) return; //read G row_offsets: for(int i=0;(i<n_entries) && keep_going;++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; g_row_offsets.push_back(value); } //ignore next 2 lines: // if( !std::getline(m_stream, line) || !std::getline(m_stream, line) ) return; g_col_indices.reserve(g_nnz); //read G col_indices: for(int i=0;(i<g_nnz) && keep_going;++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; g_col_indices.push_back(value); } //ignore next line: // if( !std::getline(m_stream, line) ) return; //remove the following for extraction: //{ if( !std::getline(m_stream, line) ) return; int n_aggs = 0; std::sscanf(line.c_str(),"aggregate: size=%d",&n_aggs); //assert( n_aggs == g_nrows );//not true for subgraph extraction! aggregates.reserve(n_aggs); //read aggregate: for(int i=0;(i<n_aggs) && keep_going;++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; aggregates.push_back(value); } //} end remove code for extraction if( !keep_going || !std::getline(m_stream, line) ) return; int cg_nrows=0; int cg_nnz=0; std::sscanf(line.c_str(),"result %c: nrows=%d, nnz=%d",&c, &cg_nrows, &cg_nnz); //debug: std::cout<<c<<","<<cg_nrows<<","<<cg_nnz<<"\n"; // //m_stream.close();//not really needed...destructor handles this //return; n_entries = cg_nrows+1; cg_row_offsets.reserve(n_entries); //ignore next line: // if( !std::getline(m_stream, line) ) return; //read G row_offsets: for(int i=0;(i<n_entries) && keep_going;++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; cg_row_offsets.push_back(value); } //ignore next 2 lines: // if( !std::getline(m_stream, line) || !std::getline(m_stream, line) ) return; cg_col_indices.reserve(cg_nnz); //read G col_indices: for(int i=0;(i<cg_nnz) && keep_going;++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; cg_col_indices.push_back(value); } m_stream.close();//not really needed...destructor handles this } template<typename Vector> bool check_diffs(const Vector& v1, const Vector& v2) { typedef typename Vector::value_type T; Vector v(v1.size(), 0); std::transform(v1.begin(), v1.end(), v2.begin(), v.begin(), std::minus<T>()); if( std::find_if(v.begin(), v.end(), std::bind2nd(std::not_equal_to<T>(), 0)) != v.end() ) return true; else return false; } //check if sort(delta(r1)) == sort(delta(r2)) //where delta(r)={r[i+1]-r[i] | i <- [0..|r|-1]} // template<typename Vector> bool check_delta_invariant(const Vector& r1, const Vector& r2) { typedef typename Vector::value_type T; size_t sz = r1.size(); assert( sz == r2.size() ); Vector d1(sz-1); std::transform(r1.begin()+1, r1.end(), r1.begin(), d1.begin(), std::minus<int>()); Vector d2(sz-1); std::transform(r2.begin()+1, r2.end(), r2.begin(), d2.begin(), std::minus<int>()); std::sort(d1.begin(), d1.end()); std::sort(d2.begin(), d2.end()); return (d1 == d2); } } class NvgraphCAPITests_ContractionCSR : public ::testing::Test { public: NvgraphCAPITests_ContractionCSR() : nvgraph_handle(NULL), initial_graph(NULL) {} protected: static void SetupTestCase() { } static void TearDownTestCase() { } virtual void SetUp() { if (nvgraph_handle == NULL) { status = nvgraphCreate(&nvgraph_handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // set up graph status = nvgraphCreateGraphDescr(nvgraph_handle, &initial_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st topoData; topoData.nvertices = 5; topoData.nedges = 9; int neighborhood[] = {0, 2, 3, 5, 7, 9}; //row_offsets int edgedest[] = {1, 3, 3, 1, 4, 0, 2, 2, 4};//col_indices topoData.source_offsets = neighborhood; topoData.destination_indices = edgedest; status = nvgraphSetGraphStructure(nvgraph_handle, initial_graph,(void*) &topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph data size_t numsets = 2; float vertexvals0[] = {0.1f, 0.15893e-20f, 1e27f, 13.2f, 0.f}; float vertexvals1[] = {13., 322.64, 1e28, -1.4, 22.3}; void* vertexptr[] = {(void*)vertexvals0, (void*)vertexvals1}; cudaDataType_t type_v[] = {CUDA_R_32F, CUDA_R_32F}; float edgevals0[] = {0.1f, 0.9153e-20f, 0.42e27f, 185.23, 1e21f, 15.6f, 215.907f, 912.2f, 0.2f}; float edgevals1[] = {13., 322.64, 1e28, 197534.2, 0.1, 0.425e-5, 5923.4, 0.12e-12, 52.}; void* edgeptr[] = {(void*)edgevals0, (void*)edgevals1}; cudaDataType_t type_e[] = {CUDA_R_32F, CUDA_R_32F}; status = nvgraphAllocateVertexData(nvgraph_handle, initial_graph, numsets, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(nvgraph_handle, initial_graph, (void *)vertexptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(nvgraph_handle, initial_graph, (void *)vertexptr[1], 1 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(nvgraph_handle, initial_graph, numsets, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(nvgraph_handle, initial_graph, (void *)edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(nvgraph_handle, initial_graph, (void *)edgeptr[1], 1 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //save data - those will be available in the tests directly graph_neigh.assign(neighborhood, neighborhood + topoData.nvertices + 1); graph_edged.assign(edgedest, edgedest + topoData.nedges); graph_vvals0.assign(vertexvals0, vertexvals0 + topoData.nvertices); graph_vvals1.assign(vertexvals1, vertexvals1 + topoData.nvertices); graph_evals0.assign(edgevals0, edgevals0 + topoData.nedges); graph_evals1.assign(edgevals1, edgevals1 + topoData.nedges); } virtual void TearDown() { // destroy graph if (nvgraph_handle != NULL) { status = nvgraphDestroyGraphDescr(nvgraph_handle, initial_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraph_handle = NULL; } // release library if (nvgraph_handle != NULL) { status = nvgraphDestroy(nvgraph_handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraph_handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t nvgraph_handle; nvgraphGraphDescr_t initial_graph; std::vector<int> graph_neigh; std::vector<int> graph_edged; std::vector<float> graph_vvals0; std::vector<float> graph_vvals1; std::vector<float> graph_evals0; std::vector<float> graph_evals1; }; TEST_F(NvgraphCAPITests_ContractionCSR, CSRContractionTestCreation) { nvgraphStatus_t status; nvgraphGraphDescr_t temp_graph1 = NULL;//, temp_graph2 = NULL; { status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //size_t numaggregates = 3; size_t szaggregates = 5; int aggregates[] = {0, 1, 1, 0, 2}; //exception is being dumped by GTEST after [RUN]! //so try-catch is not needed and it doesn't help with that // try{ int mult = 0; int sum = 1; status = nvgraphContractGraph(nvgraph_handle, initial_graph, temp_graph1, aggregates, szaggregates, (nvgraphSemiringOps_t)mult, (nvgraphSemiringOps_t)sum, (nvgraphSemiringOps_t)mult, (nvgraphSemiringOps_t)sum, 0);//unused } catch( const std::exception& ex ) { // dump exception: std::cerr<< "Exception:"<<ex.what()<<std::endl;//nope, but exception is being dumped by GTEST after [RUN]! //ASSERT_STREQ( "Exception:", ex.what() );//nope... } catch(...) { std::cerr<< "Exception: Unknown"<<std::endl;//nope, but exception is being dumped by GTEST after [RUN]! //ASSERT_STREQ( "Exception:", "Unknown" );//nope... } ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; tData.source_offsets=NULL; tData.destination_indices=NULL; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph1, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); const int nv = 3; const int ne = 7; ASSERT_EQ(tData.nvertices, nv); ASSERT_EQ(tData.nedges, ne); float getVvals0[nv]; float getVvals1[nv]; float getEvals0[ne]; float getEvals1[ne]; status = nvgraphGetVertexData(nvgraph_handle, temp_graph1, (void *)getVvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(nvgraph_handle, temp_graph1, (void *)getVvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph1, (void *)getEvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph1, (void *)getEvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphCAPITests_ContractionCSR, CSRContractionNegative) { nvgraphStatus_t status; { nvgraphGraphDescr_t temp_graph2 = NULL; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); size_t szaggregates = 3; int aggregates[] = {0, 1, 2};//this should fail because size of aggregates should match n_vertices of original graph //exception is being dumped by GTEST after [RUN]! //so try-catch is not needed and it doesn't help with that // try{ int mult = 0; int sum = 1; status = nvgraphContractGraph(nvgraph_handle, initial_graph, temp_graph2, aggregates, szaggregates, (nvgraphSemiringOps_t)mult, (nvgraphSemiringOps_t)sum, (nvgraphSemiringOps_t)mult, (nvgraphSemiringOps_t)sum, 0);//unused } catch( const std::exception& ex ) { // dump exception: std::cerr<< "Exception:"<<ex.what()<<std::endl;//nope, but exception is being dumped by GTEST after [RUN]! //ASSERT_STREQ( "Exception:", ex.what() );//nope... } catch(...) { std::cerr<< "Exception: Unknown"<<std::endl;//nope, but exception is being dumped by GTEST after [RUN]! //ASSERT_STREQ( "Exception:", "Unknown" );//nope... } ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } { nvgraphGraphDescr_t temp_graph2 = NULL; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); size_t szaggregates = 5; int aggregates[] = {0, 0, 1, 1, 3};//this should fail because not whole range [0..max(aggregates[])] is covered //exception is being dumped by GTEST after [RUN]! //so try-catch is not needed and it doesn't help with that // try{ int mult = 0; int sum = 1; status = nvgraphContractGraph(nvgraph_handle, initial_graph, temp_graph2, aggregates, szaggregates, (nvgraphSemiringOps_t)mult, (nvgraphSemiringOps_t)sum, (nvgraphSemiringOps_t)mult, (nvgraphSemiringOps_t)sum, 0);//unused } catch( const std::exception& ex ) { // dump exception: std::cerr<< "Exception:"<<ex.what()<<std::endl;//nope, but exception is being dumped by GTEST after [RUN]! //ASSERT_STREQ( "Exception:", ex.what() );//nope... } catch(...) { std::cerr<< "Exception: Unknown"<<std::endl;//nope, but exception is being dumped by GTEST after [RUN]! //ASSERT_STREQ( "Exception:", "Unknown" );//nope... } ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } TEST_F(NvgraphCAPITests_ContractionCSR, CSRContractionNetworkX) { nvgraphStatus_t status; try{ nvgraphGraphDescr_t netx_graph = NULL; nvgraphGraphDescr_t contracted_graph = NULL; status = nvgraphCreateGraphDescr(nvgraph_handle, &netx_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphCreateGraphDescr(nvgraph_handle, &contracted_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); std::string fname("/mnt/nvgraph_test_data/graphs/networkx/ctr_test.dat"); std::vector<int> g_row_offsets; std::vector<int> g_col_indices; std::vector<int> aggregates; std::vector<int> cg_row_offsets; std::vector<int> cg_col_indices; fill_contraction_data(fname, g_row_offsets, g_col_indices, aggregates, cg_row_offsets, cg_col_indices); //std::cout<<"********* step 1: \n"; ASSERT_EQ( g_row_offsets.empty(), false); ASSERT_EQ( g_col_indices.empty(), false); ASSERT_EQ( aggregates.empty(), false); ASSERT_EQ(cg_row_offsets.empty(), false); ASSERT_EQ(cg_col_indices.empty(), false); //std::cout<<"********* step 1.1: \n"; ASSERT_EQ( g_col_indices.size(), g_row_offsets.back() ); ASSERT_EQ( cg_col_indices.size(), cg_row_offsets.back()); //std::cout<<"********* step 1.2: \n"; nvgraphCSRTopology32I_st topoData; topoData.nvertices = g_row_offsets.size()-1;//last is nnz topoData.nedges = g_col_indices.size(); //std::cout<<"(n,m):"<<topoData.nvertices // <<", "<<topoData.nedges<<std::endl; topoData.source_offsets = &g_row_offsets[0]; topoData.destination_indices = &g_col_indices[0]; //std::cout<<"********* step 1.3: \n"; status = nvgraphSetGraphStructure(nvgraph_handle, netx_graph, (void*) &topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 2: \n"; size_t numsets = 1; std::vector<float> vdata(topoData.nvertices, 1.); void* vptr[] = {(void*) &vdata[0]}; cudaDataType_t type_v[] = {CUDA_R_32F}; std::vector<float> edata(topoData.nedges, 1.); void* eptr[] = {(void*) &edata[0]}; cudaDataType_t type_e[] = {CUDA_R_32F}; status = nvgraphAllocateVertexData(nvgraph_handle, netx_graph, numsets, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 3: \n"; status = nvgraphSetVertexData(nvgraph_handle, netx_graph, (void *)vptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 4: \n"; status = nvgraphAllocateEdgeData(nvgraph_handle, netx_graph, numsets, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 5: \n"; status = nvgraphSetEdgeData(nvgraph_handle, netx_graph, (void *)eptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 6: \n"; int mult = 0; int sum = 1; status = nvgraphContractGraph(nvgraph_handle, netx_graph, contracted_graph, &aggregates[0], aggregates.size(), (nvgraphSemiringOps_t)mult, (nvgraphSemiringOps_t)sum, (nvgraphSemiringOps_t)mult, (nvgraphSemiringOps_t)sum, 0);//unused ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 7: \n"; nvgraphCSRTopology32I_st tData; tData.source_offsets=NULL; tData.destination_indices=NULL; //1st time to get nvertices and nedges // status = nvgraphGetGraphStructure(nvgraph_handle, contracted_graph, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 8: \n"; int cgnv = cg_row_offsets.size()-1; int cgne = cg_col_indices.size(); ASSERT_EQ(tData.nvertices, cgnv); ASSERT_EQ(tData.nedges, cgne); //std::cout<<"********* step 9: \n"; std::vector<int> cgro(cgnv+1, 0); std::vector<int> cgci(cgne, 0); tData.source_offsets = &cgro[0]; tData.destination_indices = &cgci[0]; //2nd time to get row_offsets and column_indices // status = nvgraphGetGraphStructure(nvgraph_handle, contracted_graph, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout << "cg row_offsets:\n"; //std::copy(cgro.begin(), cgro.end(), // std::ostream_iterator<int>(std::cout,"\n")); //std::cout << "cg col_indices:\n"; //std::copy(cgci.begin(), cgci.end(), // std::ostream_iterator<int>(std::cout,"\n")); //PROBLEM: might differ due to different vertex numbering // ///ASSERT_EQ(check_diffs(cg_row_offsets, cgro), false); ///ASSERT_EQ(check_diffs(cg_col_indices, cgci), false); //this is one invariant we can check, besides vector sizes: // ASSERT_EQ( check_delta_invariant( cg_row_offsets, cgro ), true); //std::cout<<"********* step 10: \n"; status = nvgraphDestroyGraphDescr(nvgraph_handle, contracted_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(nvgraph_handle, netx_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } catch( const std::exception& ex ) { // dump exception: std::cerr<< "Exception:"<<ex.what()<<std::endl; } catch(...) { std::cerr<< "Exception: Unknown"<<std::endl; } } int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/CMakeLists.txt
cmake_minimum_required(VERSION 3.12 FATAL_ERROR) project(CUDF_TESTS LANGUAGES C CXX CUDA) ################################################################################################### # - compiler function ----------------------------------------------------------------------------- function(ConfigureTest CMAKE_TEST_NAME CMAKE_TEST_SRC) add_executable(${CMAKE_TEST_NAME} ${CMAKE_TEST_SRC}) set_target_properties(${CMAKE_TEST_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) target_link_libraries(${CMAKE_TEST_NAME} gmock gtest gmock_main gtest_main pthread nvgraph cublas cusparse curand cusolver cudart) set_target_properties(${CMAKE_TEST_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/gtests") add_test(NAME ${CMAKE_TEST_NAME} COMMAND ${CMAKE_TEST_NAME}) endfunction(ConfigureTest) ################################################################################################### # - include paths --------------------------------------------------------------------------------- include_directories( "${CMAKE_BINARY_DIR}/include" "${CMAKE_SOURCE_DIR}/include" "${CMAKE_SOURCE_DIR}/thirdparty/cnmem/include" "${CMAKE_SOURCE_DIR}/thirdparty/cub" "${CMAKE_SOURCE_DIR}/../external" "${CMAKE_SOURCE_DIR}/../external/cusp" "${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}" ) ################################################################################################### # - library paths --------------------------------------------------------------------------------- link_directories("${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}" # CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES is an undocumented/unsupported variable containing the link directories for nvcc "${CMAKE_BINARY_DIR}/lib" "${GTEST_LIBRARY_DIR}") ################################################################################################### ### test sources ################################################################################## ################################################################################################### ################################################################################################### # - nvgraph tests ------------------------------------------------------------------------------------- set(NVGRAPH_TEST_SRC "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_test.cpp" ) ConfigureTest(NVGRAPH_TEST "${NVGRAPH_TEST_SRC}") #################################################################################################### ## - triangles tests ------------------------------------------------------------------------------------- # #set(TRIANGLES_TEST_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_capi_tests_triangles.cpp") # #ConfigureTest(TRIANGLES_TEST "${TRIANGLES_TEST_SRC}") # #################################################################################################### ## - 2d_partitioning ------------------------------------------------------------------------------- # #set(2DPARTITIONING_TEST_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/2d_partitioning_test.cpp" # ) # #ConfigureTest(2DPARTITIONING_TEST "${2DPARTITIONING_TEST_SRC}") # #################################################################################################### ## - nvgraph_benchmark ----------------------------------------------------------------------------- # #set(NVGRAPH_BENCHMARK_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_benchmark.cpp" # "${CMAKE_CURRENT_SOURCE_DIR}/mmio.c" # ) # #ConfigureTest(NVGRAPH_BENCHMARK "${NVGRAPH_BENCHMARK_SRC}") # #################################################################################################### ## - nvgraph_capi_tests_2d_bfs --------------------------------------------------------------------- # #set(NVGRAPH_CAPI_TESTS_2D_BFS_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_capi_tests_2d_bfs.cpp" # ) # #ConfigureTest(NVGRAPH_CAPI_TESTS_2D_BFS "${NVGRAPH_CAPI_TESTS_2D_BFS_SRC}") # #################################################################################################### ## - nvgraph_capi_tests_2d_bfs_net ----------------------------------------------------------------- # #set(NVGRAPH_CAPI_TESTS_2D_BFS_NET_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_capi_tests_2d_bfs_net.cpp" # ) # #ConfigureTest(NVGRAPH_CAPI_TESTS_2D_BFS_NET "${NVGRAPH_CAPI_TESTS_2D_BFS_NET_SRC}") # #################################################################################################### ## - nvgraph_capi_tests_algorithms ----------------------------------------------------------------- # #set(NVGRAPH_CAPI_TESTS_ALGORITHMS_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_capi_tests_algorithms.cpp" # ) # #ConfigureTest(NVGRAPH_CAPI_TESTS_ALGORITHMS "${NVGRAPH_CAPI_TESTS_ALGORITHMS_SRC}") # #################################################################################################### ## - nvgraph_capi_tests_clustering ----------------------------------------------------------------- # #set(NVGRAPH_CAPI_TESTS_CLUSTERING_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_capi_tests_clustering.cpp" # "${CMAKE_CURRENT_SOURCE_DIR}/mmio.c" # ) # #ConfigureTest(NVGRAPH_CAPI_TESTS_CLUSTERING "${NVGRAPH_CAPI_TESTS_CLUSTERING_SRC}") # #################################################################################################### ## - nvgraph_capi_tests_contraction ---------------------------------------------------------------- #if(NOT NVGRAPH_LIGHT) #set(NVGRAPH_CAPI_TESTS_CONTRACTION_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_capi_tests_contraction.cpp" # ) # #ConfigureTest(NVGRAPH_CAPI_TESTS_CONTRACTION "${NVGRAPH_CAPI_TESTS_CONTRACTION_SRC}") #endif(NOT NVGRAPH_LIGHT) # #################################################################################################### ## - nvgraph_capi_test_conversion ------------------------------------------------------------------ # #set(NVGRAPH_CAPI_TEST_CONVERSION_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_capi_tests_conversion.cpp" # ) # #ConfigureTest(NVGRAPH_CAPI_TEST_CONVERSION "${NVGRAPH_CAPI_TEST_CONVERSION_SRC}") # #################################################################################################### ## - nvgraph_capi_tests_subgraph ------------------------------------------------------------------- # #set(NVGRAPH_CAPI_TESTS_SUBGRAPH_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_capi_tests_subgraph.cpp" # ) # #ConfigureTest(NVGRAPH_CAPI_TESTS_SUBGRAPH "${NVGRAPH_CAPI_TESTS_SUBGRAPH_SRC}") # #################################################################################################### ## - nvgraph_capi_tests_traversal ------------------------------------------------------------------ # #set(NVGRAPH_CAPI_TESTS_TRAVERSAL_SRC # "${CMAKE_CURRENT_SOURCE_DIR}/nvgraph_capi_tests_traversal.cpp" # ) # #ConfigureTest(NVGRAPH_CAPI_TESTS_TRAVERSAL "${NVGRAPH_CAPI_TESTS_TRAVERSAL_SRC}") ################################################################################################### ### enable testing ################################################################################ ################################################################################################### enable_testing()
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/mmio.h
/* * Matrix Market I/O library for ANSI C * * See http://math.nist.gov/MatrixMarket for details. * * */ #ifndef MM_IO_H #define MM_IO_H #define MM_MAX_LINE_LENGTH 1025 #define MatrixMarketBanner "%%MatrixMarket" #define MM_MAX_TOKEN_LENGTH 64 typedef char MM_typecode[4]; char *mm_typecode_to_str(MM_typecode matcode); int mm_read_banner(FILE *f, MM_typecode *matcode); int mm_read_mtx_crd_size(FILE *f, int *M, int *N, int *nz); int mm_read_mtx_array_size(FILE *f, int *M, int *N); int mm_write_banner(FILE *f, MM_typecode matcode); int mm_write_mtx_crd_size(FILE *f, int M, int N, int nz); int mm_write_mtx_array_size(FILE *f, int M, int N); /********************* MM_typecode query fucntions ***************************/ #define mm_is_matrix(typecode) ((typecode)[0]=='M') #define mm_is_sparse(typecode) ((typecode)[1]=='C') #define mm_is_coordinate(typecode)((typecode)[1]=='C') #define mm_is_dense(typecode) ((typecode)[1]=='A') #define mm_is_array(typecode) ((typecode)[1]=='A') #define mm_is_complex(typecode) ((typecode)[2]=='C') #define mm_is_real(typecode) ((typecode)[2]=='R') #define mm_is_pattern(typecode) ((typecode)[2]=='P') #define mm_is_integer(typecode) ((typecode)[2]=='I') #define mm_is_symmetric(typecode)((typecode)[3]=='S') #define mm_is_general(typecode) ((typecode)[3]=='G') #define mm_is_skew(typecode) ((typecode)[3]=='K') #define mm_is_hermitian(typecode)((typecode)[3]=='H') int mm_is_valid(MM_typecode matcode); /* too complex for a macro */ /********************* MM_typecode modify fucntions ***************************/ #define mm_set_matrix(typecode) ((*typecode)[0]='M') #define mm_set_coordinate(typecode) ((*typecode)[1]='C') #define mm_set_array(typecode) ((*typecode)[1]='A') #define mm_set_dense(typecode) mm_set_array(typecode) #define mm_set_sparse(typecode) mm_set_coordinate(typecode) #define mm_set_complex(typecode)((*typecode)[2]='C') #define mm_set_real(typecode) ((*typecode)[2]='R') #define mm_set_pattern(typecode)((*typecode)[2]='P') #define mm_set_integer(typecode)((*typecode)[2]='I') #define mm_set_symmetric(typecode)((*typecode)[3]='S') #define mm_set_general(typecode)((*typecode)[3]='G') #define mm_set_skew(typecode) ((*typecode)[3]='K') #define mm_set_hermitian(typecode)((*typecode)[3]='H') #define mm_clear_typecode(typecode) ((*typecode)[0]=(*typecode)[1]= \ (*typecode)[2]=' ',(*typecode)[3]='G') #define mm_initialize_typecode(typecode) mm_clear_typecode(typecode) /********************* Matrix Market error codes ***************************/ #define MM_COULD_NOT_READ_FILE 11 #define MM_PREMATURE_EOF 12 #define MM_NOT_MTX 13 #define MM_NO_HEADER 14 #define MM_UNSUPPORTED_TYPE 15 #define MM_LINE_TOO_LONG 16 #define MM_COULD_NOT_WRITE_FILE 17 /******************** Matrix Market internal definitions ******************** MM_matrix_typecode: 4-character sequence ojbect sparse/ data storage dense type scheme string position: [0] [1] [2] [3] Matrix typecode: M(atrix) C(oord) R(eal) G(eneral) A(array) C(omplex) H(ermitian) P(attern) S(ymmetric) I(nteger) K(kew) ***********************************************************************/ #define MM_MTX_STR "matrix" #define MM_ARRAY_STR "array" #define MM_DENSE_STR "array" #define MM_COORDINATE_STR "coordinate" #define MM_SPARSE_STR "coordinate" #define MM_COMPLEX_STR "complex" #define MM_REAL_STR "real" #define MM_INT_STR "integer" #define MM_GENERAL_STR "general" #define MM_SYMM_STR "symmetric" #define MM_HERM_STR "hermitian" #define MM_SKEW_STR "skew-symmetric" #define MM_PATTERN_STR "pattern" /* high level routines */ int mm_write_mtx_crd(char fname[], int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode); int mm_read_mtx_crd_data(FILE *f, int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode); int mm_read_mtx_crd_entry(FILE *f, int *I, int *J, double *real, double *img, MM_typecode matcode); int mm_read_unsymmetric_sparse(const char *fname, int *M_, int *N_, int *nz_, double **val_, int **I_, int **J_); #endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_capi_tests_algorithms.cpp
// This is gtest application that contains all of the C API tests. Parameters: // nvgraph_capi_tests [--perf] [--stress-iters N] [--gtest_filter=NameFilterPatter] // It also accepts any other gtest (1.7.0) default parameters. // Right now this application contains: // 1) Sanity Check tests - tests on simple examples with known answer (or known behaviour) // 2) Correctness checks tests - tests on real graph data, uses reference algorithm // (CPU code for SrSPMV and python scripts for other algorithms, see // python scripts here: //sw/gpgpu/nvgraph/test/ref/) with reference results, compares those two. // It also measures performance of single algorithm C API call, enf enabled (see below) // 3) Corner cases tests - tests with some bad inputs, bad parameters, expects library to handle // it gracefully // 4) Stress tests - makes sure that library result is persistent throughout the library usage // (a lot of C API calls). Also makes some assumptions and checks on memory usage during // this test. // // We can control what tests to launch by using gtest filters. For example: // Only sanity tests: // ./nvgraph_capi_tests --gtest_filter=*Sanity* // And, correspondingly: // ./nvgraph_capi_tests --gtest_filter=*Correctness* // ./nvgraph_capi_tests --gtest_filter=*Corner* // ./nvgraph_capi_tests --gtest_filter=*Stress* // Or, combination: // ./nvgraph_capi_tests --gtest_filter=*Sanity*:*Correctness* // // Performance reports are provided in the ERIS format and disabled by default. // Could be enabled by adding '--perf' to the command line. I added this parameter to vlct // // Parameter '--stress-iters N', which gives multiplier (not an absolute value) for the number of launches for stress tests // #include <utility> #include "gtest/gtest.h" #include "nvgraph_test_common.h" #include "valued_csr_graph.hxx" #include "readMatrix.hxx" #include "nvgraphP.h" #include "nvgraph.h" #include <nvgraph_experimental.h> // experimental header, contains hidden API entries, can be shared only under special circumstances without reveling internal things #include "stdlib.h" #include <algorithm> // do the perf measurements, enabled by command line parameter '--perf' static int PERF = 0; // minimum vertices in the graph to perform perf measurements #define PERF_ROWS_LIMIT 10000 // number of repeats = multiplier/num_vertices #define SRSPMV_ITER_MULTIPLIER 1000000000 #define SSSP_ITER_MULTIPLIER 30000000 #define WIDEST_ITER_MULTIPLIER 30000000 #define PAGERANK_ITER_MULTIPLIER 300000000 static std::string ref_data_prefix = ""; static std::string graph_data_prefix = ""; // iterations for stress tests = this multiplier * iterations for perf tests static int STRESS_MULTIPLIER = 1; static int simple_repeats = 50; static int complex_repeats = 20; static int print_test_timings = 1; // utility template <typename T> struct nvgraph_Const; template <> struct nvgraph_Const<double> { static const cudaDataType_t Type = CUDA_R_64F; static const double inf; static const double tol; typedef union fpint { double f; unsigned long u; } fpint_st; }; const double nvgraph_Const<double>::inf = DBL_MAX; const double nvgraph_Const<double>::tol = 1e-6; // this is what we use as a tolerance in the algorithms, more precision than this is useless for CPU reference comparison template <> struct nvgraph_Const<float> { static const cudaDataType_t Type = CUDA_R_32F; static const float inf; static const float tol; typedef union fpint { float f; unsigned u; } fpint_st; }; const float nvgraph_Const<float>::inf = FLT_MAX; const float nvgraph_Const<float>::tol = 1e-4; template <typename T> struct comparison { bool operator() (T* lhs, T* rhs) {return (*lhs) < (*rhs);} }; struct SR_OP { const char* get_name(nvgraphSemiring_t sr) { const char* ret = "Unknown_SR"; switch (sr) { case NVGRAPH_PLUS_TIMES_SR: ret = "PLUS_TIMES_SR"; break; case NVGRAPH_MIN_PLUS_SR: ret = "MIN_PLUS_SR"; break; case NVGRAPH_MAX_MIN_SR: ret = "MAX_MIN_SR"; break; case NVGRAPH_OR_AND_SR: ret = "OR_AND_SR"; break; } return ret; }; template <typename T> T plus(const T& a, const T& b, nvgraphSemiring_t sr) { T ret = (T)0; switch (sr) { case NVGRAPH_PLUS_TIMES_SR: ret = a + b; break; case NVGRAPH_MIN_PLUS_SR: ret = std::min(a, b); break; case NVGRAPH_MAX_MIN_SR: ret = std::max(a, b); break; case NVGRAPH_OR_AND_SR: ret = (T)((bool)(a) | (bool)(b)); break; default: printf("Semiring %d is not supported, check line %d\n", (int)sr, __LINE__); //FAIL() << "Semiring #" << (int)sr << " is not supported."; } return ret; }; template <typename T> T mul(const T& a, const T& b, nvgraphSemiring_t sr) { T ret = (T)0; switch (sr) { case NVGRAPH_PLUS_TIMES_SR: ret = a * b; break; case NVGRAPH_MIN_PLUS_SR: ret = a + b; break; case NVGRAPH_MAX_MIN_SR: ret = std::min(a, b);; break; case NVGRAPH_OR_AND_SR: ret = (T)((bool)(a) & (bool)(b)); break; default: printf("Semiring %d is not supported, check line %d\n", (int)sr, __LINE__); //FAIL() << "Semiring #" << (int)sr << " is not supported."; } return ret; }; template <typename T> T get_ini(const nvgraphSemiring_t& sr) { T ret = (T)0; switch (sr) { case NVGRAPH_PLUS_TIMES_SR: ret = (T)0; break; case NVGRAPH_MIN_PLUS_SR: ret = nvgraph_Const<T>::inf; break; case NVGRAPH_MAX_MIN_SR: ret = -(nvgraph_Const<T>::inf); break; case NVGRAPH_OR_AND_SR: ret = (T)0; break; default: printf("Semiring %d is not supported, check line %d\n", (int)sr, __LINE__); //FAIL() << "Semiring #" << (int)sr << " is not supported."; } return ret; }; } SR_OPS; template <typename T> bool enough_device_memory(int n, int nnz, size_t add) { size_t mtotal, mfree; cudaMemGetInfo(&mfree, &mtotal); if (mfree > add + sizeof(T)*3*(n + nnz)) return true; return false; } std::string convert_to_local_path(const std::string& in_file) { std::string wstr = in_file; if ((wstr != "dummy") & (wstr != "")) { std::string prefix; if (graph_data_prefix.length() > 0) { prefix = graph_data_prefix; } else { #ifdef _WIN32 //prefix = "C:\\mnt\\eris\\test\\matrices_collection\\"; prefix = "Z:\\matrices_collection\\"; std::replace(wstr.begin(), wstr.end(), '/', '\\'); #else prefix = "/mnt/nvgraph_test_data/"; #endif } wstr = prefix + wstr; } return wstr; } std::string convert_to_local_path_refdata(const std::string& in_file) { std::string wstr = in_file; if ((wstr != "dummy") & (wstr != "")) { std::string prefix; if (ref_data_prefix.length() > 0) { prefix = ref_data_prefix; } else { #ifdef _WIN32 //prefix = "C:\\mnt\\eris\\test\\ref_data\\"; prefix = "Z:\\ref_data\\"; std::replace(wstr.begin(), wstr.end(), '/', '\\'); #else prefix = "/mnt/nvgraph_test_data/ref_data/"; #endif } wstr = prefix + wstr; } return wstr; } // SrSPMV tests typedef struct SrSPMV_Usecase_t { std::string graph_file; nvgraphSemiring_t sr; double alpha; double beta; double tolerance_mul; SrSPMV_Usecase_t(const std::string& a, nvgraphSemiring_t b, const double c, const double d, double tolerance_multiplier = 1.0) : sr(b), alpha(c), beta(d), tolerance_mul(tolerance_multiplier) { graph_file = convert_to_local_path(a);}; SrSPMV_Usecase_t& operator=(const SrSPMV_Usecase_t& rhs) { graph_file = rhs.graph_file; sr = rhs.sr; alpha = rhs.alpha; beta = rhs.beta; return *this; }; } SrSPMV_Usecase; typedef struct SSSP_Usecase_t { std::string graph_file; int source_vert; std::string result_file; double tolerance_mul; SSSP_Usecase_t(const std::string& a, int b, const std::string& c, double tolerance_multiplier = 1.0) : source_vert(b), tolerance_mul(tolerance_multiplier) { graph_file = convert_to_local_path(a); result_file = convert_to_local_path_refdata(c);}; SSSP_Usecase_t& operator=(const SSSP_Usecase_t& rhs) { graph_file = rhs.graph_file; source_vert = rhs.source_vert; result_file = rhs.result_file; return *this; } } SSSP_Usecase; typedef struct WidestPath_Usecase_t { std::string graph_file; int source_vert; std::string result_file; double tolerance_mul; WidestPath_Usecase_t(const std::string& a, int b, const std::string& c, double tolerance_multiplier = 1.0) : source_vert(b), tolerance_mul(tolerance_multiplier) { graph_file = convert_to_local_path(a); result_file = convert_to_local_path_refdata(c);}; WidestPath_Usecase_t& operator=(const WidestPath_Usecase_t& rhs) { graph_file = rhs.graph_file; source_vert = rhs.source_vert; result_file = rhs.result_file; return *this; } } WidestPath_Usecase; typedef struct Pagerank_Usecase_t { std::string graph_file; float alpha; std::string result_file; double tolerance_mul; Pagerank_Usecase_t(const std::string& a, float b, const std::string& c, double tolerance_multiplier = 1.0) : alpha(b), tolerance_mul(tolerance_multiplier) { graph_file = convert_to_local_path(a); result_file = convert_to_local_path_refdata(c);}; Pagerank_Usecase_t& operator=(const Pagerank_Usecase_t& rhs) { graph_file = rhs.graph_file; alpha = rhs.alpha; result_file = rhs.result_file; return *this; } } Pagerank_Usecase; class NVGraphCAPITests_SrSPMV : public ::testing::TestWithParam<SrSPMV_Usecase> { public: NVGraphCAPITests_SrSPMV() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { //const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); //printf("We are in test %s of test case %s.\n", test_info->name(), test_info->test_case_name()); if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const SrSPMV_Usecase& param) { double test_start, test_end, read_start, read_end; test_start = second(); const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << "_alpha_" << (int)param.alpha << "_beta_" << (int)param.beta; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file) + ss.str(); nvgraphTopologyType_t topo = NVGRAPH_CSR_32; int weight_index = 0; int x_index = 0; int y_index = 1; nvgraphStatus_t status; read_start = second(); FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); ASSERT_EQ(read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val), 0); fclose(fpin); read_end = second(); if (!enough_device_memory<T>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size())) || (PERF && (n < PERF_ROWS_LIMIT || param.alpha + param.beta < 2))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data //@TODO: random fill? std::vector<T> calculated_res(n); std::vector<T> data1(n), data2(n); for (int i = 0; i < n; i++) { data1[i] = (T)(1.0*rand()/RAND_MAX - 0.5); data2[i] = (T)(1.0*rand()/RAND_MAX - 0.5); //printf ("data1[%d]==%f, data2[%d]==%f\n", i, data1[i], i, data2[i]); } void* vertexptr[2] = {(void*)&data1[0], (void*)&data2[0]}; cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 2, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[0], x_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[1], y_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], weight_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T alphaT = (T)param.alpha; T betaT = (T)param.beta; // run if (PERF) { double start, stop; // warmup status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alphaT, x_index, (void*)&betaT, y_index, param.sr); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDeviceSynchronize(); int repeat = simple_repeats; start = second(); start = second(); // perf loop for (int i = 0; i < repeat; i++) { status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alphaT, x_index, (void*)&betaT, y_index, param.sr); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } cudaDeviceSynchronize(); stop = second(); printf("&&&& PERF Time_%s_%s %10.8f -ms\n", test_id.c_str(), SR_OPS.get_name(param.sr), 1000.0*(stop-start)/((double)repeat)); } // reinit data status = nvgraphSetVertexData(handle, g1, (void*)&data2[0], y_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alphaT, x_index, (void*)&betaT, y_index, param.sr); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // get result status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], y_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // check correctness std::vector<T> expected_res(n, SR_OPS.get_ini<T>(param.sr)); for (int row = 0; row < n; row++) { for (int nz = read_row_ptr[row]; nz < read_row_ptr[row+1]; nz++) { expected_res[row] = SR_OPS.plus<T>(expected_res[row], SR_OPS.mul<T>(SR_OPS.mul<T>(param.alpha, read_val[nz], param.sr), data1[read_col_ind[nz]], param.sr), param.sr); } expected_res[row] = SR_OPS.plus<T>(expected_res[row], SR_OPS.mul<T>(data2[row], param.beta, param.sr), param.sr); double reference_res = (double)expected_res[row]; double nvgraph_res = (double)calculated_res[row]; ASSERT_NEAR(reference_res, nvgraph_res, nvgraph_Const<T>::tol) << "In row #" << row << " graph " << param.graph_file << " semiring " << SR_OPS.get_name(param.sr) << " alpha=" << param.alpha << " beta=" << param.beta << "\n"; } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); test_end = second(); if (print_test_timings) printf("Test took: %f seconds from which %f seconds were spent on data reading\n", test_end - test_start, read_end - read_start); } }; TEST_P(NVGraphCAPITests_SrSPMV, CheckResultDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_SrSPMV, CheckResultFloat) { run_current_test<float>(GetParam()); } /// WidestPath tests class NVGraphCAPITests_WidestPath : public ::testing::TestWithParam<WidestPath_Usecase> { public: NVGraphCAPITests_WidestPath() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const WidestPath_Usecase& param) { double test_start, test_end, read_start, read_end; test_start = second(); const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << param.source_vert; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file) + std::string("_") + ss.str().c_str(); nvgraphTopologyType_t topo = NVGRAPH_CSC_32; nvgraphStatus_t status; read_start = second(); FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); ASSERT_EQ(read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val), 0); fclose(fpin); read_end = second(); if (!enough_device_memory<T>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size())) || (PERF && n < PERF_ROWS_LIMIT)) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data size_t numsets = 1; std::vector<T> calculated_res(n); //void* vertexptr[1] = {(void*)&calculated_res[0]}; cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, numsets, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //status = nvgraphSetVertexData(handle, g1, vertexptr[0], 0 ); //ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, numsets, type_e ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int weight_index = 0; int source_vert = param.source_vert; int widest_path_index = 0; status = nvgraphWidestPath(handle, g1, weight_index, &source_vert, widest_path_index); cudaDeviceSynchronize(); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // run if (PERF) { double start, stop; start = second(); start = second(); int repeat = simple_repeats; for (int i = 0; i < repeat; i++) { status = nvgraphWidestPath(handle, g1, weight_index, &source_vert, widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } cudaDeviceSynchronize(); stop = second(); printf("&&&& PERF Time_%s %10.8f -ms\n", test_id.c_str(), 1000.0*(stop-start)/repeat); } // get result status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // check correctness if (param.result_file.length()>0) { fpin = fopen(param.result_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << " Cannot read file with reference data: " << param.result_file << std::endl; std::vector<T> expected_res(n); ASSERT_EQ(read_binary_vector(fpin, n, expected_res), 0); fclose(fpin); for (int i = 0; i < n; i++) { ASSERT_NEAR(expected_res[i], calculated_res[i], nvgraph_Const<T>::tol) << "In row #" << i << " graph " << param.graph_file << " source_vert=" << source_vert<< "\n" ; } } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); test_end = second(); if (print_test_timings) printf("Test took: %f seconds from which %f seconds were spent on data reading\n", test_end - test_start, read_end - read_start); } }; TEST_P(NVGraphCAPITests_WidestPath, CheckResultDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_WidestPath, CheckResultFloat) { run_current_test<float>(GetParam()); } //// SSSP tests class NVGraphCAPITests_SSSP : public ::testing::TestWithParam<SSSP_Usecase> { public: NVGraphCAPITests_SSSP() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const SSSP_Usecase& param) { double test_start, test_end, read_start, read_end; test_start = second(); const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << param.source_vert; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file) + std::string("_") + ss.str().c_str(); nvgraphTopologyType_t topo = NVGRAPH_CSC_32; nvgraphStatus_t status; read_start = second(); FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); ASSERT_EQ(read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val), 0); fclose(fpin); read_end = second(); if (!enough_device_memory<T>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size())) || (PERF && n < PERF_ROWS_LIMIT)) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data size_t numsets = 1; std::vector<T> calculated_res(n); //void* vertexptr[1] = {(void*)&calculated_res[0]}; cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, numsets, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //status = nvgraphSetVertexData(handle, descrG, vertexptr[0], 0 ); //ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, numsets, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int weight_index = 0; int source_vert = param.source_vert; int sssp_index = 0; // run status = nvgraphSssp(handle, g1, weight_index, &source_vert, sssp_index); cudaDeviceSynchronize(); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); if (PERF) { double start, stop; start = second(); start = second(); int repeat = simple_repeats; for (int i = 0; i < repeat; i++) { status = nvgraphSssp(handle, g1, weight_index, &source_vert, sssp_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } cudaDeviceSynchronize(); stop = second(); printf("&&&& PERF Time_%s %10.8f -ms\n", test_id.c_str(), 1000.0*(stop-start)/repeat); } // get result status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], sssp_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // check with reference if (param.result_file.length() > 0) { fpin = fopen(param.result_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << " Cannot read file with reference data: " << param.result_file << std::endl; std::vector<T> expected_res(n); ASSERT_EQ(read_binary_vector(fpin, n, expected_res), 0); fclose(fpin); for (int i = 0; i < n; i++) { ASSERT_NEAR(expected_res[i], calculated_res[i], nvgraph_Const<T>::tol) << "In row #" << i << " graph " << param.graph_file << " source_vert=" << source_vert<< "\n" ; } } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); test_end = second(); if (print_test_timings) printf("Test took: %f seconds from which %f seconds were spent on data reading\n", test_end - test_start, read_end - read_start); } }; TEST_P(NVGraphCAPITests_SSSP, CheckResultDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_SSSP, CheckResultFloat) { run_current_test<float>(GetParam()); } class NVGraphCAPITests_Pagerank : public ::testing::TestWithParam<Pagerank_Usecase> { public: NVGraphCAPITests_Pagerank() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const Pagerank_Usecase& param) { double test_start, test_end, read_start, read_end; test_start = second(); const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << param.alpha; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file) + std::string("_") + ss.str().c_str(); if (param.graph_file == "dummy") { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } // Waive hugebubbles test, http://nvbugs/200189611 /*{ cudaDeviceProp prop; cudaGetDeviceProperties ( &prop, 0 ); std::string gpu(prop.name); if (param.graph_file.find("hugebubbles-00020") != std::string::npos && (gpu.find("M40") != npos || gpu.find("GTX 980 Ti") != npos || gpu.find("GTX TITAN X") != npos || gpu.find("M6000") != npos || gpu.find("GTX 680") != npos) ) std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; }*/ nvgraphTopologyType_t topo = NVGRAPH_CSC_32; nvgraphStatus_t status; read_start = second(); FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); std::vector<T> dangling(n); ASSERT_EQ(read_data_amgx_csr_bin_rhs (fpin, n, nnz, read_row_ptr, read_col_ind, read_val, dangling), 0); fclose(fpin); read_end = second(); if (!enough_device_memory<T>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size())) || (PERF && n < PERF_ROWS_LIMIT)) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data std::vector<T> calculated_res(n, (T)1.0/n); void* vertexptr[2] = {(void*)&dangling[0], (void*)&calculated_res[0]}; cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 2, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[1], 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int bookmark_index = 0; int weight_index = 0; T alpha = param.alpha; int pagerank_index = 1; int has_guess = 0; float tolerance = (sizeof(T) > 4 ? 1e-8f : 1e-6f) * param.tolerance_mul; int max_iter = 1000; status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); cudaDeviceSynchronize(); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // run if (PERF) { double start, stop; start = second(); start = second(); int repeat = complex_repeats; for (int i = 0; i < repeat; i++) { status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } cudaDeviceSynchronize(); stop = second(); printf("&&&& PERF Time_%s %10.8f -ms\n", test_id.c_str(), 1000.0*(stop-start)/repeat); } // get result status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], pagerank_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); std::sort(calculated_res.begin(), calculated_res.end()); // check with reference if (param.result_file.length()>0) { fpin = fopen(param.result_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << " Cannot read file with reference data: " << param.result_file << std::endl; std::vector<T> expected_res(n); ASSERT_EQ(read_binary_vector(fpin, n, expected_res), 0); fclose(fpin); T tot_err = 0.0, err; int n_err = 0; for (int i = 0; i < n; i++) { err = fabs(expected_res[i] - calculated_res[i]); if (err> nvgraph_Const<T>::tol) { tot_err+=err; n_err++; } } if (n_err) { EXPECT_NEAR(tot_err/n_err, nvgraph_Const<T>::tol, nvgraph_Const<T>::tol*9.99); // Network x used n*1e-10 for precision ASSERT_LE(n_err, 0.001*n); // we tolerate 0.1% of values with a litte difference //printf("number of incorrect entries: %d\n", n_err); } } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); test_end = second(); if (print_test_timings) printf("Test took: %f seconds from which %f seconds were spent on data reading\n", test_end - test_start, read_end - read_start); } }; TEST_P(NVGraphCAPITests_Pagerank, CheckResultDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_Pagerank, CheckResultFloat) { run_current_test<float>(GetParam()); } class NVGraphCAPITests_KrylovPagerank : public ::testing::TestWithParam<Pagerank_Usecase> { public: NVGraphCAPITests_KrylovPagerank() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const Pagerank_Usecase& param) { const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << param.alpha; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file) + std::string("_") + ss.str().c_str(); if (param.graph_file == "dummy") { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphTopologyType_t topo = NVGRAPH_CSC_32; nvgraphStatus_t status; FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); std::vector<T> dangling(n); ASSERT_EQ(read_data_amgx_csr_bin_rhs (fpin, n, nnz, read_row_ptr, read_col_ind, read_val, dangling), 0); fclose(fpin); if (!enough_device_memory<T>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data std::vector<T> calculated_res(n, (T)1.0/n); void* vertexptr[2] = {(void*)&dangling[0], (void*)&calculated_res[0]}; cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 2, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[1], 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int bookmark_index = 0; int weight_index = 0; T alpha = param.alpha; int pagerank_index = 1; int has_guess = 0; float tolerance = (sizeof(T) > 4 ? 1e-8f : 1e-6f) * param.tolerance_mul; int max_iter = 150; int ss_sz = 7; // run if (PERF && n > PERF_ROWS_LIMIT) { double start, stop; start = second(); start = second(); int repeat = 10; for (int i = 0; i < repeat; i++) status = nvgraphKrylovPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, tolerance, max_iter, ss_sz, has_guess, pagerank_index); stop = second(); printf("&&&& PERF Time_%s %10.8f -ms\n", test_id.c_str(), 1000.0*(stop-start)/repeat); } else status = nvgraphKrylovPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, tolerance, max_iter, ss_sz, has_guess, pagerank_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // get result status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], pagerank_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); std::sort(calculated_res.begin(), calculated_res.end()); // check with reference if (param.result_file.length()>0) { fpin = fopen(param.result_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << " Cannot read file with reference data: " << param.result_file << std::endl; std::vector<T> expected_res(n); ASSERT_EQ(read_binary_vector(fpin, n, expected_res), 0); fclose(fpin); T tot_err = 0.0, err; int n_err = 0; for (int i = 0; i < n; i++) { err = fabs(expected_res[i] - calculated_res[i]); if (err> nvgraph_Const<T>::tol) { tot_err+=err; n_err++; } } if (n_err) { EXPECT_NEAR(tot_err/n_err, nvgraph_Const<T>::tol, nvgraph_Const<T>::tol*9.99); // Network x used n*1e-10 for precision ASSERT_LE(n_err, 0.001*n); // we tolerate 0.1% of values with a litte difference //printf("number of incorrect entries: %d\n", n_err); } } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_KrylovPagerank, CheckResultDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_KrylovPagerank, CheckResultFloat) { run_current_test<float>(GetParam()); } /// Few sanity checks. class NVGraphCAPITests_SrSPMV_Sanity : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_SrSPMV_Sanity() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSR_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template <typename T> void prepare_and_run(const nvgraphCSRTopology32I_st& topo_st, T* edgedata, T* data1, T* data2, T alpha, T beta, T* expected ) { g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph n = topo_st.nvertices; nnz = topo_st.nedges; status = nvgraphSetGraphStructure(handle, g1, (void*)&topo_st, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 2, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); void* vertexptr[2] = {(void*)data1, (void*)data2}; void* edgeptr[1] = {(void*)edgedata}; int weight_index = 0; int x_index = 0; int y_index = 1; status = nvgraphSetVertexData(handle, g1, vertexptr[0], x_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[1], y_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, edgeptr[0], weight_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alpha, x_index, (void*)&beta, y_index, NVGRAPH_PLUS_TIMES_SR); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // get result std::vector<T> calculated_res(n); status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], y_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 0; row < n; row++) { double reference_res = (double)expected[row]; double nvgraph_res = (double)calculated_res[row]; ASSERT_NEAR(reference_res, nvgraph_res, nvgraph_Const<T>::tol) << "row=" << row << " alpha=" << alpha << " beta=" << beta << "\n"; } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // Trivial matrix with trivial answers, checks plus_times sr only (but that is good enough) and some set of alfa and beta template <typename T> void run_simple_test() { n = 1024; nnz = 1024; std::vector<int> offsets(n+1), neighborhood(nnz); std::vector<T> data1(n), data2(n); for (int i = 0; i < n; i++) { data1[i] = (T)(1.0*rand()/RAND_MAX - 0.5); data2[i] = (T)(1.0*rand()/RAND_MAX - 0.5); offsets[i] = neighborhood[i] = i; } offsets[n] = n; std::vector<T> edge_data(nnz, (T)(-2.0)); std::vector<T> expected_res(n, SR_OPS.get_ini<T>(NVGRAPH_PLUS_TIMES_SR)); nvgraphCSRTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; T pa[] = {-1.0, 0.0, 0.5, 1.0}; T pb[] = {-1.0, 0.0, 0.5, 1.0}; for (int ia = 0; ia < sizeof(pa)/sizeof(T); ia++) for (int ib = 0; ib < sizeof(pb)/sizeof(T); ib++) { for (int i = 0; i < n; i++) { expected_res[i] = SR_OPS.get_ini<T>(NVGRAPH_PLUS_TIMES_SR); } for (int i = 0; i < n; i++) { T tv1 = SR_OPS.mul<T>(data1[i], edge_data[i], NVGRAPH_PLUS_TIMES_SR); tv1 = SR_OPS.mul<T>(tv1, pa[ia], NVGRAPH_PLUS_TIMES_SR); T tv2 = SR_OPS.mul<T>(data2[i], pb[ib], NVGRAPH_PLUS_TIMES_SR); tv2 = SR_OPS.plus<T>(tv1, tv2, NVGRAPH_PLUS_TIMES_SR); expected_res[i] = SR_OPS.plus<T>(expected_res[i], tv2, NVGRAPH_PLUS_TIMES_SR); } prepare_and_run<T>(topology, &edge_data[0], &data1[0], &data2[0], pa[ia], pb[ib], &expected_res[0]); } } }; TEST_F(NVGraphCAPITests_SrSPMV_Sanity, SanityDouble) { run_simple_test<double>(); } TEST_F(NVGraphCAPITests_SrSPMV_Sanity, SanityFloat) { run_simple_test<float>(); } class NVGraphCAPITests_SSSP_Sanity : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_SSSP_Sanity() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSC_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template <typename T> void prepare_and_run(const nvgraphCSCTopology32I_st& topo_st, T* edgedata, T* expected ) { g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph n = topo_st.nvertices; nnz = topo_st.nedges; status = nvgraphSetGraphStructure(handle, g1, (void*)&topo_st, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 1, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); void* edgeptr[1] = {(void*)edgedata}; status = nvgraphSetEdgeData(handle, g1, edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int source_vert = 0; int sssp_index = 0; int weight_index = 0; status = nvgraphSssp(handle, g1, weight_index, &source_vert, sssp_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status) << ", n=" << n << std::endl; // get result std::vector<T> calculated_res(n); status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], sssp_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 0; row < n; row++) { double reference_res = (double)expected[row]; double nvgraph_res = (double)calculated_res[row]; ASSERT_NEAR(reference_res, nvgraph_res, nvgraph_Const<T>::tol) << "row=" << row << ", n=" << n << std::endl; } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // cycle graph, all weights = 1, shortest path = vertex number template <typename T> void run_cycle_test() { n = 1050; nnz = n; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[i] = i; neighborhood[i] = (n - 1 + i) % n; } offsets[n] = n; std::vector<T> edge_data(nnz, (T)1.0); std::vector<T> expected_res(n, nvgraph_Const<T>::inf); for (int i = 0; i < n; i++) { expected_res[i] = i; } // extensive run for small N's for (int i = 3; i < 200; i++) { neighborhood[0] = i - 1; nvgraphCSCTopology32I_st topology = {i, i, &offsets[0], &neighborhood[0]}; prepare_and_run<T>(topology, &edge_data[0], &expected_res[0]); } // also trying larger N's for (int i = 1020; i < 1030; i++) { neighborhood[0] = i - 1; nvgraphCSCTopology32I_st topology = {i, i, &offsets[0], &neighborhood[0]}; prepare_and_run<T>(topology, &edge_data[0], &expected_res[0]); } } // full binary tree, all weights = 1, shortest path length = level of the node template <typename T> void run_tree_test() { int k = 3; n = (1 << k) - 1; nnz = (1 << k) - 2; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[i+1] = i; } offsets[0] = 0; for (int i = 0; i < nnz; i++) { neighborhood[i] = i / 2; } std::vector<T> edge_data(nnz, (T)1.0); std::vector<T> expected_res(n, nvgraph_Const<T>::inf); expected_res[0] = 0; for (int i = 1; i < k; i++) { for (int v = 0; v < (1 << i); v++) expected_res[(1 << i) - 1 + v] = i; } nvgraphCSCTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; prepare_and_run<T>(topology, &edge_data[0], &expected_res[0]); } }; TEST_F(NVGraphCAPITests_SSSP_Sanity, SanityCycleDouble) { run_cycle_test<double>(); } TEST_F(NVGraphCAPITests_SSSP_Sanity, SanityCycleFloat) { run_cycle_test<float>(); } TEST_F(NVGraphCAPITests_SSSP_Sanity, SanityTreeDouble) { run_tree_test<double>(); } TEST_F(NVGraphCAPITests_SSSP_Sanity, SanityTreeFloat) { run_tree_test<float>(); } class NVGraphCAPITests_WidestPath_Sanity : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_WidestPath_Sanity() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSC_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template <typename T> void prepare_and_run(const nvgraphCSCTopology32I_st& topo_st, T* edgedata, T* expected ) { g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph n = topo_st.nvertices; nnz = topo_st.nedges; status = nvgraphSetGraphStructure(handle, g1, (void*)&topo_st, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 1, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); void* edgeptr[1] = {(void*)edgedata}; status = nvgraphSetEdgeData(handle, g1, edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int source_vert = 0; int widest_path_index = 0; int weight_index = 0; status = nvgraphWidestPath(handle, g1, weight_index, &source_vert, widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // get result std::vector<T> calculated_res(n); status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 0; row < n; row++) { double reference_res = (double)expected[row]; double nvgraph_res = (double)calculated_res[row]; ASSERT_NEAR(reference_res, nvgraph_res, nvgraph_Const<T>::tol); } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // cycle graph, weigths are from n-1 to 0 starting with vertex = 0. widest path = [inf, n-1, n-2, ..., 1] template <typename T> void run_cycle_test() { n = 1024; nnz = n; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[i] = i; neighborhood[i] = (n - 1 + i) % n; } offsets[n] = n; std::vector<T> edge_data(nnz, 0); std::vector<T> expected_res(n, nvgraph_Const<T>::inf); for (int i = 1; i < n; i++) { edge_data[i] = (T)(n - i); } for (int i = 1; i < n; i++) { expected_res[i] = (T)(n - i); } nvgraphCSCTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; prepare_and_run<T>(topology, &edge_data[0], &expected_res[0]); } // cycle graph, edge weigths are equal to the (max_tree_lvl - edge_lvl). widest path to vertex is = (max_lvl - vertex_lvl) template <typename T> void run_tree_test() { int k = 10; n = (1 << k) - 1; nnz = (1 << k) - 2; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[i+1] = i; } offsets[0] = 0; for (int i = 0; i < nnz; i++) { neighborhood[i] = i / 2; } // fill edge data and expected res accordingly std::vector<T> edge_data(nnz); std::vector<T> expected_res(n, nvgraph_Const<T>::inf); for (int i = 1; i < k; i++) { for (int v = 0; v < (1 << i); v++) { edge_data[(1 << i) - 2 + v] = (k - i); expected_res[(1 << i) - 1 + v] = (k - i); } } nvgraphCSCTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; prepare_and_run<T>(topology, &edge_data[0], &expected_res[0]); } }; TEST_F(NVGraphCAPITests_WidestPath_Sanity, SanityCycleDouble) { run_cycle_test<double>(); } TEST_F(NVGraphCAPITests_WidestPath_Sanity, SanityCycleFloat) { run_cycle_test<float>(); } TEST_F(NVGraphCAPITests_WidestPath_Sanity, SanityTreeDouble) { run_tree_test<double>(); } TEST_F(NVGraphCAPITests_WidestPath_Sanity, SanityTreeFloat) { run_tree_test<float>(); } class NVGraphCAPITests_Pagerank_Sanity : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_Pagerank_Sanity() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSC_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template <typename T> void prepare_and_run(const nvgraphCSCTopology32I_st& topo_st, T* bookmark, T* edge_data ) { g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph n = topo_st.nvertices; nnz = topo_st.nedges; status = nvgraphSetGraphStructure(handle, g1, (void*)&topo_st, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 2, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int bookmark_index = 0; int weight_index = 0; T alpha = 0.85; int pagerank_index = 1; int has_guess = 0; float tolerance = 1e-6;//sizeof(T) > 4 ? 1e-8f : 1e-6f; int max_iter = 1000; status = nvgraphSetVertexData(handle, g1, (void*)bookmark, bookmark_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); void* edgeptr[1] = {(void*)edge_data}; status = nvgraphSetEdgeData(handle, g1, edgeptr[0], weight_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // run status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // get result std::vector<T> calculated_res(n); status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], pagerank_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 1; row < n; row++) { //printf("PR[%d] == %10.7g, PR[%d] == %10.7g\n", row-1, calculated_res[row-1], row, calculated_res[row]); double res1 = (double)calculated_res[row-1]; double res2 = (double)calculated_res[row]; ASSERT_LE(res1, res2) << "In row: " << row << "\n"; } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // path graph, weigths are = 1, last node is dangling, pagerank should be in ascending order template <typename T> void run_path_test() { n = 1024; nnz = n - 1; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[1+i] = i; neighborhood[i] = i; } offsets[0] = 0; std::vector<T> edge_data(nnz, 1); std::vector<T> dangling(n, 0); dangling[n-1] = (T)(1); nvgraphCSCTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; prepare_and_run<T>(topology, &dangling[0], &edge_data[0]); } }; TEST_F(NVGraphCAPITests_Pagerank_Sanity, SanityPathDouble) { run_path_test<double>(); } TEST_F(NVGraphCAPITests_Pagerank_Sanity, SanitypathFloat) { run_path_test<float>(); } /// Corner cases for the C API class NVGraphCAPITests_SrSPMV_CornerCases : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_SrSPMV_CornerCases() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSR_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } // Trivial matrix with trivial answers, checks plus_times sr only (but that is good enough) and sets of alfa and beta from {0.0, 1.0} template <typename T> void run_simple_test() { n = 1024; nnz = 1024; std::vector<int> offsets(n+1), neighborhood(nnz); std::vector<T> data1(n), data2(n); for (int i = 0; i < n; i++) { data1[i] = (T)(1.0*rand()/RAND_MAX - 0.5); data2[i] = (T)(1.0*rand()/RAND_MAX - 0.5); offsets[i] = neighborhood[i] = i; } offsets[n] = n; std::vector<T> edge_data(nnz, (T)1.0); nvgraphCSRTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; T alpha = (T)(1.0); T beta = (T)(1.0); int weight_index = 0; int x_index = 0; int y_index = 1; g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph n = topology.nvertices; nnz = topology.nedges; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; // not multivalued CSR status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alpha, x_index, (void*)&beta, y_index, NVGRAPH_PLUS_TIMES_SR); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphAllocateVertexData(handle, g1, 2, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); void* vertexptr[2] = {(void*)&data1[0], (void*)&data2[0]}; void* edgeptr[1] = {(void*)(&edge_data[0])}; status = nvgraphSetVertexData(handle, g1, vertexptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[1], 1 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // different bad values status = nvgraphSrSpmv(NULL, g1, weight_index, (void*)&alpha, x_index, (void*)&beta, y_index, NVGRAPH_PLUS_TIMES_SR); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSrSpmv(handle, NULL, weight_index, (void*)&alpha, x_index, (void*)&beta, y_index, NVGRAPH_PLUS_TIMES_SR); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSrSpmv(handle, g1, 10, (void*)&alpha, x_index, (void*)&beta, y_index, NVGRAPH_PLUS_TIMES_SR); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alpha, 10, (void*)&beta, y_index, NVGRAPH_PLUS_TIMES_SR); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alpha, x_index, (void*)&beta, 10, NVGRAPH_PLUS_TIMES_SR); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alpha, x_index, (void*)&beta, y_index, NVGRAPH_PLUS_TIMES_SR); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // only CSR is supported { status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSC_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 2, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alpha, x_index, (void*)&beta, y_index, NVGRAPH_PLUS_TIMES_SR); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // only 32F and 64F real are supported // but we cannot check SrSPMV for that because AllocateData will throw an error first /*for (int i = 0; i < 10; i++) { if (i == CUDA_R_32F || i == CUDA_R_64F) continue; cudaDataType_t t_type_v[2] = {(cudaDataType_t)i, (cudaDataType_t)i}; cudaDataType_t t_type_e[1] = {(cudaDataType_t)i}; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 2, t_type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, t_type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alpha, x_index, (void*)&beta, y_index, NVGRAPH_PLUS_TIMES_SR); ASSERT_EQ(NVGRAPH_STATUS_TYPE_NOT_SUPPORTED, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } */ } }; TEST_F(NVGraphCAPITests_SrSPMV_CornerCases, CornerCasesDouble) { run_simple_test<double>(); } TEST_F(NVGraphCAPITests_SrSPMV_CornerCases, CornerCasesFloat) { run_simple_test<float>(); } class NVGraphCAPITests_SSSP_CornerCases : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_SSSP_CornerCases() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSC_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template <typename T> void run_cycle_test() { n = 1024; nnz = n; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[i] = i; neighborhood[i] = (n - 1 + i) % n; } offsets[n] = n; std::vector<T> edge_data(nnz, (T)1.0); nvgraphCSCTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; int source_vert = 0; int sssp_index = 0; int weight_index = 0; g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // only multivaluedCSR are supported status = nvgraphSssp(handle, g1, weight_index, &source_vert, sssp_index); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 1, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); void* edgeptr[1] = {(void*)&edge_data[0]}; status = nvgraphSetEdgeData(handle, g1, edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSssp(NULL, g1, weight_index, &source_vert, sssp_index); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSssp(handle, NULL, weight_index, &source_vert, sssp_index); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSssp(handle, g1, 500, &source_vert, sssp_index); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSssp(handle, g1, weight_index, NULL, sssp_index); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSssp(handle, g1, weight_index, &source_vert, 500); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSssp(handle, g1, weight_index, &source_vert, sssp_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // only CSC is supported { status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 1, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSssp(handle, g1, weight_index, &source_vert, sssp_index); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // only 32F and 64F real are supported // but we cannot check SSSP for that because AllocateData will throw an error first /*for (int i = 0; i < 10; i++) { if (i == CUDA_R_32F || i == CUDA_R_64F) continue; cudaDataType_t t_type_v[2] = {(cudaDataType_t)i, (cudaDataType_t)i}; cudaDataType_t t_type_e[1] = {(cudaDataType_t)i}; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSC_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 1, t_type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, t_type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSssp(handle, g1, weight_index, &source_vert, sssp_index); ASSERT_EQ(NVGRAPH_STATUS_TYPE_NOT_SUPPORTED, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } */ } }; TEST_F(NVGraphCAPITests_SSSP_CornerCases, CornerCasesDouble) { run_cycle_test<double>(); } TEST_F(NVGraphCAPITests_SSSP_CornerCases, CornerCasesFloat) { run_cycle_test<float>(); } class NVGraphCAPITests_WidestPath_CornerCases : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_WidestPath_CornerCases() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSC_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template <typename T> void run_test() { n = 1024; nnz = n; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[i] = i; neighborhood[i] = (n - 1 + i) % n; } offsets[n] = n; std::vector<T> edge_data(nnz, (T)1.0); std::vector<T> expected_res(n, nvgraph_Const<T>::inf); for (int i = 0; i < n; i++) { expected_res[i] = i; } nvgraphCSCTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int source_vert = 0; int widest_path_index = 0; int weight_index = 0; status = nvgraphWidestPath(handle, g1, weight_index, &source_vert, widest_path_index); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 1, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); void* edgeptr[1] = {(void*)&edge_data[0]}; status = nvgraphSetEdgeData(handle, g1, edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphWidestPath(NULL, g1, weight_index, &source_vert, widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphWidestPath(handle, NULL, weight_index, &source_vert, widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphWidestPath(handle, g1, 500, &source_vert, widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphWidestPath(handle, g1, weight_index, NULL, widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphWidestPath(handle, g1, weight_index, &source_vert, 500); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphWidestPath(handle, g1, weight_index, &source_vert, widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // only CSC is supported { status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 1, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphWidestPath(handle, g1, weight_index, &source_vert, widest_path_index); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // only 32F and 64F real are supported // but we cannot check WidestPath for that because AllocateData will throw an error first /*for (int i = 0; i < 10; i++) { if (i == CUDA_R_32F || i == CUDA_R_64F) continue; cudaDataType_t t_type_v[2] = {(cudaDataType_t)i, (cudaDataType_t)i}; cudaDataType_t t_type_e[1] = {(cudaDataType_t)i}; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSC_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 1, t_type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, t_type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphWidestPath(handle, g1, weight_index, &source_vert, widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_TYPE_NOT_SUPPORTED, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } */ } }; TEST_F(NVGraphCAPITests_WidestPath_CornerCases, CornerCasesDouble) { run_test<double>(); } TEST_F(NVGraphCAPITests_WidestPath_CornerCases, CornerCasesFloat) { run_test<float>(); } class NVGraphCAPITests_Pagerank_CornerCases : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_Pagerank_CornerCases() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSC_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template <typename T> void run_test() { n = 1024; nnz = n - 1; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[1+i] = i; neighborhood[i] = i; } offsets[0] = 0; std::vector<T> edge_data(nnz, 1.0); std::vector<T> dangling(n, 0); dangling[n-1] = (T)(1); nvgraphCSCTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph n = topology.nvertices; nnz = topology.nedges; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; int bookmark_index = 0; int weight_index = 0; T alpha = 0.85; T alpha_bad = -10.0; int pagerank_index = 1; int has_guess = 0; float tolerance = 1e-6;//sizeof(T) > 4 ? 1e-8f : 1e-6f; int max_iter = 1000; // should be multivalued status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 2, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, (void*)&dangling[0], bookmark_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void*)&edge_data[0], weight_index ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // different invalid values status = nvgraphPagerank(NULL, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, NULL, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, g1, 500, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, g1, weight_index, NULL, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha_bad, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, 500, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, 500, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, 500, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, -10.0f, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, 10.0f, max_iter); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); { status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 2, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // only 32F and 64F real are supported // but we cannot check Pagerank for that because AllocateData will throw an error first /*for (int i = 0; i < 10; i++) { if (i == CUDA_R_32F || i == CUDA_R_64F) continue; cudaDataType_t t_type_v[2] = {(cudaDataType_t)i, (cudaDataType_t)i}; cudaDataType_t t_type_e[1] = {(cudaDataType_t)i}; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSC_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 2, t_type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, t_type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_TYPE_NOT_SUPPORTED, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } */ } }; TEST_F(NVGraphCAPITests_Pagerank_CornerCases, CornerCasesDouble) { run_test<double>(); } TEST_F(NVGraphCAPITests_Pagerank_CornerCases, CornerCasesFloat) { run_test<float>(); } class NVGraphCAPITests_SrSPMV_Stress : public ::testing::TestWithParam<SrSPMV_Usecase> { public: NVGraphCAPITests_SrSPMV_Stress() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { //const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); //printf("We are in test %s of test case %s.\n", test_info->name(), test_info->test_case_name()); if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const SrSPMV_Usecase& param) { nvgraphTopologyType_t topo = NVGRAPH_CSR_32; nvgraphStatus_t status; FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); ASSERT_EQ(read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val), 0); fclose(fpin); const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); if (!enough_device_memory<T>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data //@TODO: random fill? std::vector<T> calculated_res(n); std::vector<T> data1(n), data2(n); for (int i = 0; i < n; i++) { data1[i] = (T)(1.0*rand()/RAND_MAX - 0.5); data2[i] = (T)(1.0*rand()/RAND_MAX - 0.5); } void* vertexptr[2] = {(void*)&data1[0], (void*)&data2[0]}; cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 2, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[1], 1 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int weight_index = 0; int x_index = 0; int y_index = 1; // reinit data status = nvgraphSetVertexData(handle, g1, (void*)&data2[0], y_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T alphaT = (T)param.alpha; T betaT = (T)param.beta; // run int repeat = std::max((int)(((float)(SRSPMV_ITER_MULTIPLIER)*STRESS_MULTIPLIER)/n), 1); //printf ("Repeating C API call for %d times\n", repeat); std::vector<T> calculated_res1(n), calculated_res_mid(n); size_t free_mid = 0, free_last = 0, total = 0; for (int i = 0; i < repeat; i++) { // cudaMemGetInfo(&t, &total); // printf("Iteration: %d, freemem: %zu\n", i, t); status = nvgraphSrSpmv(handle, g1, weight_index, (void*)&alphaT, x_index, (void*)&betaT, y_index, param.sr); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // all of those should be equal if (i == 0) { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res1[0], y_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } else { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res_mid[0], y_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 0; row < n; row++) { // stronger condition - bit by bit equality /* if (calculated_res1[row] != calculated_res_mid[row]) { typename nvgraph_Const<T>::fpint_st comp1, comp2; comp1.f = calculated_res1[row]; comp2.f = calculated_res_mid[row]; ASSERT_EQ(comp1.u, comp2.u) << "Difference in result in row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; } */ ASSERT_NEAR(calculated_res1[row], calculated_res_mid[row], nvgraph_Const<T>::tol) << "Difference in result in row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; } } if (i == std::min(50, (int)(repeat/2))) { cudaMemGetInfo(&free_mid, &total); } if (i == repeat-1) { cudaMemGetInfo(&free_last, &total); } // reset vectors status = nvgraphSetVertexData(handle, g1, vertexptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[1], 1 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } ASSERT_LE(free_mid, free_last) << "Memory difference between iteration #" << std::min(50, (int)(repeat/2)) << " and last iteration is " << (double)(free_last-free_mid)/1e+6 << "MB"; status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_SrSPMV_Stress, StressDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_SrSPMV_Stress, StressFloat) { run_current_test<float>(GetParam()); } class NVGraphCAPITests_Widest_Stress : public ::testing::TestWithParam<WidestPath_Usecase> { public: NVGraphCAPITests_Widest_Stress() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { //const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); //printf("We are in test %s of test case %s.\n", test_info->name(), test_info->test_case_name()); if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const WidestPath_Usecase& param) { nvgraphTopologyType_t topo = NVGRAPH_CSC_32; nvgraphStatus_t status; FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); ASSERT_EQ(read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val), 0); fclose(fpin); const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); if (!enough_device_memory<T>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data size_t numsets = 1; std::vector<T> calculated_res(n); //void* vertexptr[1] = {(void*)&calculated_res[0]}; cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, numsets, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, numsets, type_e ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int weight_index = 0; int source_vert = param.source_vert; int widest_path_index = 0; // run int repeat = std::max((int)(((float)(WIDEST_ITER_MULTIPLIER)*STRESS_MULTIPLIER)/(3*n)), 1); //printf ("Repeating C API call for %d times\n", repeat); std::vector<T> calculated_res1(n), calculated_res_mid(n); size_t free_mid = 0, free_last = 0, total = 0; for (int i = 0; i < repeat; i++) { //cudaMemGetInfo(&t, &total); //printf("Iteration: %d, freemem: %zu\n", i, t); status = nvgraphWidestPath(handle, g1, weight_index, &source_vert, widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // all of those should be equal if (i == 0) { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res1[0], widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } else { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res_mid[0], widest_path_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 0; row < n; row++) { // stronger condition - bit by bit equality /* if (calculated_res1[row] != calculated_res_mid[row]) { typename nvgraph_Const<T>::fpint_st comp1, comp2; comp1.f = calculated_res1[row]; comp2.f = calculated_res_mid[row]; ASSERT_EQ(comp1.u, comp2.u) << "Difference in result in row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; } */ ASSERT_NEAR(calculated_res1[row], calculated_res_mid[row], nvgraph_Const<T>::tol) << "Difference in result in row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; } } if (i == std::min(50, (int)(repeat/2))) { cudaMemGetInfo(&free_mid, &total); } if (i == repeat-1) { cudaMemGetInfo(&free_last, &total); } } ASSERT_LE(free_mid, free_last) << "Memory difference between iteration #" << std::min(50, (int)(repeat/2)) << " and last iteration is " << (double)(free_last-free_mid)/1e+6 << "MB"; status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_Widest_Stress, StressDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_Widest_Stress, StressFloat) { run_current_test<float>(GetParam()); } class NVGraphCAPITests_SSSP_Stress : public ::testing::TestWithParam<SSSP_Usecase> { public: NVGraphCAPITests_SSSP_Stress() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { //const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); //printf("We are in test %s of test case %s.\n", test_info->name(), test_info->test_case_name()); if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const SSSP_Usecase& param) { nvgraphTopologyType_t topo = NVGRAPH_CSC_32; nvgraphStatus_t status; FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); ASSERT_EQ(read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val), 0); fclose(fpin); const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); if (!enough_device_memory<T>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data size_t numsets = 1; std::vector<T> calculated_res(n); //void* vertexptr[1] = {(void*)&calculated_res[0]}; cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, numsets, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, numsets, type_e ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int weight_index = 0; int source_vert = param.source_vert; int sssp_index = 0; // run int repeat = std::max((int)(((float)(SSSP_ITER_MULTIPLIER)*STRESS_MULTIPLIER)/(3*n)), 1); //printf ("Repeating C API call for %d times\n", repeat); std::vector<T> calculated_res1(n), calculated_res_mid(n), calculated_res_last(n); size_t free_mid = 0, free_last = 0, total = 0; for (int i = 0; i < repeat; i++) { // cudaMemGetInfo(&t, &total); // printf("Iteration: %d, freemem: %zu\n", i, t); status = nvgraphSssp(handle, g1, weight_index, &source_vert, sssp_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // all of those should be equal if (i == 0) { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res1[0], sssp_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } else { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res_mid[0], sssp_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 0; row < n; row++) { // stronger condition - bit by bit equality /* if (calculated_res1[row] != calculated_res_mid[row]) { typename nvgraph_Const<T>::fpint_st comp1, comp2; comp1.f = calculated_res1[row]; comp2.f = calculated_res_mid[row]; ASSERT_EQ(comp1.u, comp2.u) << "Difference in result in row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; } */ ASSERT_NEAR(calculated_res1[row], calculated_res_mid[row], nvgraph_Const<T>::tol) << "Difference in result in row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; } } if (i == std::min(50, (int)(repeat/2))) { cudaMemGetInfo(&free_mid, &total); } if (i == repeat-1) { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res_last[0], sssp_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaMemGetInfo(&free_last, &total); } } ASSERT_LE(free_mid, free_last) << "Memory difference between iteration #" << std::min(50, (int)(repeat/2)) << " and last iteration is " << (double)(free_last-free_mid)/1e+6 << "MB"; status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_SSSP_Stress, StressDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_SSSP_Stress, StressFloat) { run_current_test<float>(GetParam()); } class NVGraphCAPITests_Pagerank_Stress : public ::testing::TestWithParam<Pagerank_Usecase> { public: NVGraphCAPITests_Pagerank_Stress() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { //const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); //printf("We are in test %s of test case %s.\n", test_info->name(), test_info->test_case_name()); if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const Pagerank_Usecase& param) { nvgraphTopologyType_t topo = NVGRAPH_CSC_32; nvgraphStatus_t status; FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); std::vector<T> dangling(n); ASSERT_EQ(read_data_amgx_csr_bin_rhs (fpin, n, nnz, read_row_ptr, read_col_ind, read_val, dangling), 0); fclose(fpin); const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); if (!enough_device_memory<T>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data std::vector<T> calculated_res(n, (T)1.0/n); void* vertexptr[2] = {(void*)&dangling[0], (void*)&calculated_res[0]}; cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateVertexData(handle, g1, 2, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, g1, vertexptr[1], 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, 1, type_e ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int bookmark_index = 0; int weight_index = 0; T alpha = param.alpha; int pagerank_index = 1; int has_guess = 1; float tolerance = {sizeof(T) > 4 ? 1e-8f : 1e-6f}; int max_iter = 1000; // run int repeat = std::max((int)(((float)(PAGERANK_ITER_MULTIPLIER)*STRESS_MULTIPLIER)/n), 1); //printf ("Repeating C API call for %d times\n", repeat); std::vector<T> calculated_res1(n), calculated_res_mid(n); size_t free_mid = 0, free_last = 0, total = 0; for (int i = 0; i < repeat; i++) { //cudaMemGetInfo(&t, &total); //printf("Iteration: %d, freemem: %zu\n", i, t); status = nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // all of those should be equal if (i == 0) { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res1[0], pagerank_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } else { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res_mid[0], pagerank_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 0; row < n; row++) { // stronger condition - bit by bit equality /* if (calculated_res1[row] != calculated_res_mid[row]) { typename nvgraph_Const<T>::fpint_st comp1, comp2; comp1.f = calculated_res1[row]; comp2.f = calculated_res_mid[row]; ASSERT_EQ(comp1.u, comp2.u) << "Difference in result in row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; } */ ASSERT_NEAR(calculated_res1[row], calculated_res_mid[row], nvgraph_Const<T>::tol) << "Difference in result in row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; } } if (i == std::min(50, (int)(repeat/2))) { cudaMemGetInfo(&free_mid, &total); } if (i == repeat-1) { cudaMemGetInfo(&free_last, &total); } } ASSERT_LE(free_mid, free_last) << "Memory difference between iteration #" << std::min(50, (int)(repeat/2)) << " and last iteration is " << (double)(free_last-free_mid)/1e+6 << "MB"; status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_Pagerank_Stress, StressDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_Pagerank_Stress, StressFloat) { run_current_test<float>(GetParam()); } // instatiation of the performance/correctness checks INSTANTIATE_TEST_CASE_P(CorrectnessCheck1, NVGraphCAPITests_SrSPMV, ::testing::Values( // maybe check NVGRAPH_OR_AND_SR on some special bool matrices? SrSPMV_Usecase("graphs/small/small.bin", NVGRAPH_PLUS_TIMES_SR, 1, 1) , SrSPMV_Usecase("graphs/small/small.bin", NVGRAPH_MIN_PLUS_SR, 1, 1) , SrSPMV_Usecase("graphs/small/small.bin", NVGRAPH_MAX_MIN_SR, 1, 1) , SrSPMV_Usecase("graphs/small/small.bin", NVGRAPH_OR_AND_SR, 1, 1) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_PLUS_TIMES_SR, 0, 0) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_MIN_PLUS_SR, 0, 0) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_MAX_MIN_SR, 0, 0) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_OR_AND_SR, 0, 0) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_PLUS_TIMES_SR, 0, 1) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_MIN_PLUS_SR, 0, 1) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_MAX_MIN_SR, 0, 1) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_OR_AND_SR, 0, 1) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_PLUS_TIMES_SR, 1, 0) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_MIN_PLUS_SR, 1, 0) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_MAX_MIN_SR, 1, 0) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_OR_AND_SR, 1, 0) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_PLUS_TIMES_SR, 1, 1) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_MIN_PLUS_SR, 1, 1) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_MAX_MIN_SR, 1, 1) , SrSPMV_Usecase("graphs/dblp/dblp.bin", NVGRAPH_OR_AND_SR, 1, 1) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_PLUS_TIMES_SR, 0, 0) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_MIN_PLUS_SR, 0, 0) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_MAX_MIN_SR, 0, 0) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_OR_AND_SR, 0, 0) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_PLUS_TIMES_SR, 0, 1) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_MIN_PLUS_SR, 0, 1) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_MAX_MIN_SR, 0, 1) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_OR_AND_SR, 0, 1) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_PLUS_TIMES_SR, 1, 0) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_MIN_PLUS_SR, 1, 0) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_MAX_MIN_SR, 1, 0) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_OR_AND_SR, 1, 0) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_PLUS_TIMES_SR, 1, 1) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_MIN_PLUS_SR, 1, 1) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_MAX_MIN_SR, 1, 1) , SrSPMV_Usecase("graphs/Wikipedia/2003/wiki2003.bin", NVGRAPH_OR_AND_SR, 1, 1) ///// more instances ) ); INSTANTIATE_TEST_CASE_P(CorrectnessCheck2, NVGraphCAPITests_SrSPMV, ::testing::Values( SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_PLUS_TIMES_SR, 0, 0) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MIN_PLUS_SR, 0, 0) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MAX_MIN_SR, 0, 0) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_OR_AND_SR, 0, 0) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_PLUS_TIMES_SR, 0, 1) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MIN_PLUS_SR, 0, 1) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MAX_MIN_SR, 0, 1) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_OR_AND_SR, 0, 1) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MIN_PLUS_SR, 1, 0) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MAX_MIN_SR, 1, 0) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_OR_AND_SR, 1, 0) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MIN_PLUS_SR, 1, 1) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MAX_MIN_SR, 1, 1) , SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_OR_AND_SR, 1, 1) // these tests fails because of exceeding tolerance: diff = 0.00012826919555664062 vs tol = 9.9999997473787516e-05 //, SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_PLUS_TIMES_SR, 1, 1) //, SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_PLUS_TIMES_SR, 1, 0) ///// more instances ) ); INSTANTIATE_TEST_CASE_P(CorrectnessCheck, NVGraphCAPITests_WidestPath, // graph FILE source vert # file with expected result (in binary?) // // we read matrix stored in CSR and pass it as CSC - so matrix is in fact transposed, that's why we compare it to the results calculated on a transposed matrix ::testing::Values( WidestPath_Usecase("graphs/cage/cage13_T.mtx.bin", 0, "graphs/cage/cage13.widest_0.bin") , WidestPath_Usecase("graphs/cage/cage13_T.mtx.bin", 101, "graphs/cage/cage13.widest_101.bin") , WidestPath_Usecase("graphs/cage/cage14_T.mtx.bin", 0, "graphs/cage/cage14.widest_0.bin") , WidestPath_Usecase("graphs/cage/cage14_T.mtx.bin", 101, "graphs/cage/cage14.widest_101.bin") // file might be missing on eris //, WidestPath_Usecase("graphs/small/small_T.bin", 2, "graphs/small/small_T.widest_2.bin") , WidestPath_Usecase("graphs/dblp/dblp.bin", 100, "graphs/dblp/dblp_T.widest_100.bin") , WidestPath_Usecase("graphs/dblp/dblp.bin", 100000, "graphs/dblp/dblp_T.widest_100000.bin") , WidestPath_Usecase("graphs/Wikipedia/2003/wiki2003_T.bin", 100, "graphs/Wikipedia/2003/wiki2003_T.widest_100.bin") , WidestPath_Usecase("graphs/Wikipedia/2003/wiki2003_T.bin", 100000, "graphs/Wikipedia/2003/wiki2003_T.widest_100000.bin") , WidestPath_Usecase("graphs/citPatents/cit-Patents_T.mtx.bin", 6543, "") //, WidestPath_Usecase("dimacs10/kron_g500-logn20_T.mtx.bin", 100000, "") //, WidestPath_Usecase("dimacs10/hugetrace-00020_T.mtx.bin", 100000, "") //, WidestPath_Usecase("dimacs10/delaunay_n24_T.mtx.bin", 100000, "") //, WidestPath_Usecase("dimacs10/road_usa_T.mtx.bin", 100000, "") //, WidestPath_Usecase("dimacs10/hugebubbles-00020_T.mtx.bin", 100000, "") ///// more instances ) ); INSTANTIATE_TEST_CASE_P(CorrectnessCheck, NVGraphCAPITests_SSSP, // graph FILE source vert # file with expected result (in binary?) // // we read matrix stored in CSR and pass it as CSC - so matrix is in fact transposed, that's why we compare it to the results calculated on a transposed matrix ::testing::Values( SSSP_Usecase("graphs/cage/cage13_T.mtx.bin", 0, "graphs/cage/cage13.sssp_0.bin") , SSSP_Usecase("graphs/cage/cage13_T.mtx.bin", 101, "graphs/cage/cage13.sssp_101.bin") , SSSP_Usecase("graphs/cage/cage14_T.mtx.bin", 0, "graphs/cage/cage14.sssp_0.bin") , SSSP_Usecase("graphs/cage/cage14_T.mtx.bin", 101, "graphs/cage/cage14.sssp_101.bin") , SSSP_Usecase("graphs/small/small.bin", 2, "graphs/small/small.sssp_2.bin") , SSSP_Usecase("graphs/dblp/dblp.bin", 100, "graphs/dblp/dblp_T.sssp_100.bin") , SSSP_Usecase("graphs/dblp/dblp.bin", 100000, "graphs/dblp/dblp_T.sssp_100000.bin") , SSSP_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 100, "graphs/Wikipedia/2003/wiki2003_T.sssp_100.bin") , SSSP_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 100000, "graphs/Wikipedia/2003/wiki2003_T.sssp_100000.bin") , SSSP_Usecase("graphs/citPatents/cit-Patents_T.mtx.bin", 6543, "") //, SSSP_Usecase("dimacs10/kron_g500-logn20_T.mtx.bin", 100000, "") //, SSSP_Usecase("dimacs10/hugetrace-00020_T.mtx.bin", 100000, "") //, SSSP_Usecase("dimacs10/delaunay_n24_T.mtx.bin", 100000, "") //, SSSP_Usecase("dimacs10/road_usa_T.mtx.bin", 100000, "") //, SSSP_Usecase("dimacs10/hugebubbles-00020_T.mtx.bin", 100000, "") ///// more instances ) ); INSTANTIATE_TEST_CASE_P(CorrectnessCheck, NVGraphCAPITests_Pagerank, // graph FILE alpha file with expected result ::testing::Values( // Pagerank_Usecase("graphs/small/small_T.bin", 0.85, "graphs/small/small.pagerank_val_0.85.bin"), Pagerank_Usecase("graphs/webbase1M/webbase-1M_T.mtx.bin", 0.85, "graphs/webbase1M/webbase-1M.pagerank_val_0.85.bin"), Pagerank_Usecase("graphs/webBerkStan/web-BerkStan_T.mtx.bin", 0.85, "graphs/webBerkStan/web-BerkStan.pagerank_val_0.85.bin"), Pagerank_Usecase("graphs/webGoogle/web-Google_T.mtx.bin", 0.85, "graphs/webGoogle/web-Google.pagerank_val_0.85.bin"), Pagerank_Usecase("graphs/WikiTalk/wiki-Talk_T.mtx.bin", 0.85, "graphs/WikiTalk/wiki-Talk.pagerank_val_0.85.bin"), Pagerank_Usecase("graphs/citPatents/cit-Patents_T.mtx.bin", 0.85, "graphs/citPatents/cit-Patents.pagerank_val_0.85.bin"), Pagerank_Usecase("graphs/liveJournal/ljournal-2008_T.mtx.bin", 0.85, "graphs/liveJournal/ljournal-2008.pagerank_val_0.85.bin"), Pagerank_Usecase("dummy", 0.85, ""), Pagerank_Usecase("dimacs10/delaunay_n24_T.mtx.bin", 0.85, ""), Pagerank_Usecase("dummy", 0.85, ""), // waived until cublas change, see http://nvbugs/200189611, was: Pagerank_Usecase("dimacs10/hugebubbles-00020_T.mtx.bin", 0.85, ""), Pagerank_Usecase("dimacs10/hugetrace-00020_T.mtx.bin", 0.85, "", 10.0), Pagerank_Usecase("dimacs10/kron_g500-logn20_T.mtx.bin", 0.85, ""), Pagerank_Usecase("dimacs10/road_usa_T.mtx.bin", 0.85, "") //Pagerank_Usecase("dimacs10/channel-500x100x100-b050_T.mtx.bin", 0.85, ""), //Pagerank_Usecase("dimacs10/coPapersCiteseer_T.mtx.bin", 0.85, "") ///// more instances ) ); //INSTANTIATE_TEST_CASE_P(CorrectnessCheck, // NVGraphCAPITests_KrylovPagerank, // // graph FILE alpha file with expected result // ::testing::Values( // //Pagerank_Usecase("graphs/small/small_T.bin", 0.85, "graphs/small/small.pagerank_val_0.85.bin"), // Pagerank_Usecase("graphs/webbase1M/webbase-1M_T.mtx.bin", 0.85, "graphs/webbase1M/webbase-1M.pagerank_val_0.85.bin"), // Pagerank_Usecase("graphs/webBerkStan/web-BerkStan_T.mtx.bin", 0.85, "graphs/webBerkStan/web-BerkStan.pagerank_val_0.85.bin"), // Pagerank_Usecase("graphs/webGoogle/web-Google_T.mtx.bin", 0.85, "graphs/webGoogle/web-Google.pagerank_val_0.85.bin"), // Pagerank_Usecase("graphs/WikiTalk/wiki-Talk_T.mtx.bin", 0.85, "graphs/WikiTalk/wiki-Talk.pagerank_val_0.85.bin"), // Pagerank_Usecase("graphs/citPatents/cit-Patents_T.mtx.bin", 0.85, "graphs/citPatents/cit-Patents.pagerank_val_0.85.bin"), // Pagerank_Usecase("graphs/liveJournal/ljournal-2008_T.mtx.bin", 0.85, "graphs/liveJournal/ljournal-2008.pagerank_val_0.85.bin"), // Pagerank_Usecase("dummy", 0.85, ""), // Pagerank_Usecase("dimacs10/delaunay_n24_T.mtx.bin", 0.85, ""), // Pagerank_Usecase("dimacs10/hugebubbles-00020_T.mtx.bin", 0.85, ""), // Pagerank_Usecase("dimacs10/hugetrace-00020_T.mtx.bin", 0.85, "", 10.0), // Pagerank_Usecase("dimacs10/kron_g500-logn20_T.mtx.bin", 0.85, ""), // Pagerank_Usecase("dimacs10/road_usa_T.mtx.bin", 0.85, "") // //Pagerank_Usecase("dimacs10/channel-500x100x100-b050_T.mtx.bin", 0.85, ""), // //Pagerank_Usecase("dimacs10/coPapersCiteseer_T.mtx.bin", 0.85, "") // ///// more instances // ) // ); INSTANTIATE_TEST_CASE_P(StressTest, NVGraphCAPITests_SrSPMV_Stress, ::testing::Values( SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_PLUS_TIMES_SR, 1, 1), SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MIN_PLUS_SR, 1, 1), SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_MAX_MIN_SR, 1, 1), SrSPMV_Usecase("graphs/Wikipedia/2011/wiki2011.bin", NVGRAPH_OR_AND_SR, 1, 1) ) ); INSTANTIATE_TEST_CASE_P(StressTest, NVGraphCAPITests_Widest_Stress, ::testing::Values( WidestPath_Usecase("graphs/citPatents/cit-Patents_T.mtx.bin", 6543, "") ) ); INSTANTIATE_TEST_CASE_P(StressTest, NVGraphCAPITests_SSSP_Stress, ::testing::Values( SSSP_Usecase("graphs/citPatents/cit-Patents_T.mtx.bin", 6543, "") ) ); INSTANTIATE_TEST_CASE_P(StressTest, NVGraphCAPITests_Pagerank_Stress, ::testing::Values( Pagerank_Usecase("graphs/citPatents/cit-Patents_T.mtx.bin", 0.7, "") ) ); int main(int argc, char **argv) { for (int i = 0; i < argc; i++) { if (strcmp(argv[i], "--perf") == 0) PERF = 1; if (strcmp(argv[i], "--stress-iters") == 0) STRESS_MULTIPLIER = atoi(argv[i+1]); if (strcmp(argv[i], "--ref-data-dir") == 0) ref_data_prefix = std::string(argv[i+1]); if (strcmp(argv[i], "--graph-data-dir") == 0) graph_data_prefix = std::string(argv[i+1]); } srand(42); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/readMatrix.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <fstream> #include <sstream> //stringstream #include <string.h> #include <vector> #include <cstdlib> #include <iomanip> #include <algorithm> #include <cfloat> //Matrix Market COO reader-requires a call to sort in the test file template<typename IndexType_, typename ValueType_> struct Mat { IndexType_ i; IndexType_ j; ValueType_ val; bool transpose; Mat() { } //default cosntructor Mat(bool transpose) : transpose(transpose) { } //pass in when comapring rows or columns bool operator()(const Mat<IndexType_, ValueType_> &x1, const Mat<IndexType_, ValueType_> &x2) { if (!transpose) { if (x1.i == x2.i) return x1.j < x2.j; //if rows equal sort by column index return x1.i < x2.i; } else { if (x1.j == x2.j) return x1.i < x2.i; //if rows equal sort by column index return x1.j < x2.j; } } }; template<typename ValueType_> void dump_host_dense_mat(std::vector<ValueType_>& v, int ld) { std::stringstream ss; ss.str(std::string()); ss << std::setw(10); ss.precision(3); for (int i = 0; i < ld; ++i) { for (int j = 0; j < ld; ++j) { ss << v[i * ld + j] << std::setw(10); } ss << std::endl; } std::cout << ss.str(); } /** * Reads in graphs given in the "network" format. This format consists a * row for each edge in the graph, giving its source and destination. There * is no header or comment lines. * @param filename The name of the file to read in. * @param nnz The number of edges given in the file. * @param src Vector to write out the sources to. * @param dest Vector to write out the destinations to. */ template<typename IndexType> void readNetworkFile(const char * filename, size_t nnz, std::vector<IndexType>& src, std::vector<IndexType>& dest) { std::ifstream infile; infile.open(filename); src.resize(nnz); dest.resize(nnz); for (size_t i = 0; i < nnz; i++) { infile >> src[i]; infile >> dest[i]; } infile.close(); std::cout << "Read in " << nnz << " rows from: " << filename << "\n"; } //reads the Matrix Market format from the florida collection of sparse matrices assuming //the first lines are comments beginning with % template<typename IndexType_, typename ValueType_> void readMatrixMarketFile(const char * filename, IndexType_ &m, IndexType_ &n, IndexType_ &nnz, std::vector<Mat<IndexType_, ValueType_> > &matrix, bool edges_only) { std::ifstream infile; infile.open(filename); std::string line; std::stringstream params; while (1) { std::getline(infile, line); //ignore initial comments that begin with % if (line[0] != '%') { //first line without % for comments will have matrix size params << line; params >> n; params >> m; params >> nnz; break; //break and then read in COO format } } //COO format matrix.resize(nnz); //remaining file lines are tuples of row ind, col ind and possibly value //sometimes value assumed to be one for (int k = 0; k < nnz; ++k) { infile >> matrix[k].i; infile >> matrix[k].j; if (edges_only) matrix[k].val = 1.0; else infile >> matrix[k].val; } infile.close(); } //binary matrix reader functions void printUsageAndExit() { printf("%s", "Usage:./csrmv_pl matrix_csr.bin\n"); printf("%s", "M is square, in Amgx binary format\n"); exit(0); } int read_header_amgx_csr_bin(FILE* fpin, int & n, int & nz ) { char text_header[255]; unsigned int system_flags[9]; size_t is_read1, is_read2; is_read1 = fread(text_header, sizeof(char), strlen("%%NVAMGBinary\n"), fpin); is_read2 = fread(system_flags, sizeof(unsigned int), 9, fpin); if (!is_read1 || !is_read2) { printf("%s", "I/O fail\n"); return 1; } // We assume that system_flags [] = { 1, 1, whatever, 0, 0, 1, 1, n, nz }; /* bool is_mtx = system_flags[0]; bool is_rhs = system_flags[1]; bool is_soln = system_flags[2]; unsigned idx_t matrix_format = system_flags[3]; bool diag = system_flags[4]; unsigned idx_t block_dimx = system_flags[5]; unsigned idx_t block_dimy = system_flags[6]; */ if (system_flags[0] != 1 || system_flags[1] != 1 || system_flags[3] != 0 || system_flags[4] != 0 || system_flags[5] != 1 || system_flags[6] != 1 || system_flags[7] < 1 || system_flags[8] < 1) { printf( "Wrong format : system_flags [] != { 1(%d), 1(%d), 0(%d), 0(%d), 0(%d), 1(%d), 1(%d), n(%d), nz(%d) }\n\n", system_flags[0], system_flags[1], system_flags[2], system_flags[3], system_flags[4], system_flags[5], system_flags[6], system_flags[7], system_flags[8]); return 1; } n = system_flags[7]; nz = system_flags[8]; return 0; } //reader is for ints and double template<typename I> int read_csr_bin(FILE* fpin, I &n, I &nz, std::vector<I> &row_ptr, std::vector<I> &col_ind ) { size_t is_read1, is_read2, is_read3, is_read4; is_read1 = fread(&n, sizeof(I), 1, fpin); is_read2 = fread(&nz, sizeof(I), 1, fpin); if (!is_read1 || !is_read2) { printf("%s", "I/O fail\n"); return 1; } row_ptr.resize(n + 1); col_ind.resize(nz); is_read3 = fread(&row_ptr[0], sizeof(I), n + 1, fpin); is_read4 = fread(&col_ind[0], sizeof(I), nz, fpin); if (!is_read3 || !is_read4) { printf("%s", "I/O fail\n"); return 1; } return 0; } //reader is for ints and double int read_data_amgx_csr_bin(FILE* fpin, int n, int nz, std::vector<int> & row_ptr, std::vector<int> & col_ind, std::vector<double>& val ) { size_t is_read1, is_read2, is_read3; is_read1 = fread(&row_ptr[0], sizeof(std::vector<int>::value_type), n + 1, fpin); is_read2 = fread(&col_ind[0], sizeof(std::vector<int>::value_type), nz, fpin); is_read3 = fread(&val[0], sizeof(std::vector<double>::value_type), nz, fpin); if (!is_read1 || !is_read2 || !is_read3) { printf("%s", "I/O fail\n"); return 1; } return 0; } int read_data_amgx_csr_bin_rhs(FILE* fpin, int n, int nz, std::vector<int> & row_ptr, std::vector<int> & col_ind, std::vector<double>& val, std::vector<double>& rhs ) { size_t is_read1, is_read2, is_read3, is_read4; is_read1 = fread(&row_ptr[0], sizeof(std::vector<int>::value_type), n + 1, fpin); is_read2 = fread(&col_ind[0], sizeof(std::vector<int>::value_type), nz, fpin); is_read3 = fread(&val[0], sizeof(std::vector<double>::value_type), nz, fpin); is_read4 = fread(&rhs[0], sizeof(std::vector<double>::value_type), n, fpin); if (!is_read1 || !is_read2 || !is_read3 || !is_read4) { printf("%s", "I/O fail\n"); return 1; } return 0; } //reader is for ints and double int read_data_amgx_csr_bin(FILE* fpin, int n, int nz, std::vector<int> & row_ptr, std::vector<int> & col_ind, std::vector<float>& val ) { size_t is_read1, is_read2, is_read3; is_read1 = fread(&row_ptr[0], sizeof(std::vector<int>::value_type), n + 1, fpin); is_read2 = fread(&col_ind[0], sizeof(std::vector<int>::value_type), nz, fpin); double* t_storage = new double[std::max(n, nz)]; is_read3 = fread(t_storage, sizeof(double), nz, fpin); for (int i = 0; i < nz; i++) { val[i] = static_cast<float>(t_storage[i]); } delete[] t_storage; if (!is_read1 || !is_read2 || !is_read3) { printf("%s", "I/O fail\n"); return 1; } return 0; } int read_data_amgx_csr_bin_rhs(FILE* fpin, int n, int nz, std::vector<int> & row_ptr, std::vector<int> & col_ind, std::vector<float>& val, std::vector<float>& rhs ) { size_t is_read1, is_read2, is_read3, is_read4; is_read1 = fread(&row_ptr[0], sizeof(std::vector<int>::value_type), n + 1, fpin); is_read2 = fread(&col_ind[0], sizeof(std::vector<int>::value_type), nz, fpin); double* t_storage = new double[std::max(n, nz)]; is_read3 = fread(t_storage, sizeof(double), nz, fpin); for (int i = 0; i < nz; i++) { val[i] = static_cast<float>(t_storage[i]); } is_read4 = fread(t_storage, sizeof(double), n, fpin); for (int i = 0; i < n; i++) { rhs[i] = static_cast<float>(t_storage[i]); } delete[] t_storage; if (!is_read1 || !is_read2 || !is_read3 || !is_read4) { printf("%s", "I/O fail\n"); return 1; } return 0; } //read binary vector from file int read_binary_vector(FILE* fpin, int n, std::vector<float>& val ) { size_t is_read1; double* t_storage = new double[n]; is_read1 = fread(t_storage, sizeof(double), n, fpin); for (int i = 0; i < n; i++) { if (t_storage[i] == DBL_MAX) val[i] = FLT_MAX; else if (t_storage[i] == -DBL_MAX) val[i] = -FLT_MAX; else val[i] = static_cast<float>(t_storage[i]); } delete[] t_storage; if (is_read1 != (size_t) n) { printf("%s", "I/O fail\n"); return 1; } return 0; } int read_binary_vector(FILE* fpin, int n, std::vector<double>& val ) { size_t is_read1; is_read1 = fread(&val[0], sizeof(double), n, fpin); if (is_read1 != (size_t) n) { printf("%s", "I/O fail\n"); return 1; } return 0; } int read_binary_vector(FILE* fpin, int n, std::vector<int>& val ) { size_t is_read1; is_read1 = fread(&val[0], sizeof(int), n, fpin); if (is_read1 != (size_t) n) { printf("%s", "I/O fail\n"); return 1; } return 0; } //read in as one based template<typename IndexType_, typename ValueType_> void init_MatrixMarket(IndexType_ base, const char *filename, bool edges_only, //assumes value is 1 bool transpose, //parameter to run on A or A' IndexType_ &n, IndexType_ &m, IndexType_ &nnz, std::vector<ValueType_> &csrVal, std::vector<IndexType_> &csrColInd, std::vector<IndexType_> &csrRowInd) { FILE *inputFile = fopen(filename, "r"); if (inputFile == NULL) { std::cerr << "ERROR: File path not valid!" << std::endl; exit(EXIT_FAILURE); } std::vector<Mat<IndexType_, ValueType_> > matrix; readMatrixMarketFile<IndexType_, ValueType_>(filename, m, n, nnz, matrix, edges_only); Mat<IndexType_, ValueType_> compare(transpose); std::sort(matrix.begin(), matrix.end(), compare); csrVal.resize(nnz); csrColInd.resize(nnz); csrRowInd.resize(nnz); for (int k = 0; k < nnz; ++k) { csrVal[k] = matrix[k].val; csrColInd[k] = (transpose) ? matrix[k].i : matrix[k].j; //doing the transpose csrRowInd[k] = (transpose) ? matrix[k].j : matrix[k].i; } if (base == 0) //always give base 0 { for (int i = 0; i < nnz; ++i) { csrColInd[i] -= 1; //get zero based csrRowInd[i] -= 1; } } fclose(inputFile); } /*template<typename val_t> bool almost_equal (std::vector<val_t> & a, std::vector<val_t> & b, val_t epsilon) { if (a.size() != b.size()) return false; bool passed = true; std::vector<val_t>::iterator itb=b.begin(); for (std::vector<val_t>::iterator ita = a.begin() ; ita != a.end(); ++ita) { if (fabs(*ita - *itb) > epsilon) { printf("At ( %ld ) : x1=%lf | x2=%lf\n",ita-a.begin(), *ita,*itb); passed = false; } ++itb; } return passed; }*/
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_capi_tests_2d_bfs.cpp
// This is gtest application that contains all of the C API tests. Parameters: // nvgraph_capi_tests [--perf] [--stress-iters N] [--gtest_filter=NameFilterPatter] // It also accepts any other gtest (1.7.0) default parameters. // Right now this application contains: // 1) Sanity Check tests - tests on simple examples with known answer (or known behaviour) // 2) Correctness checks tests - tests on real graph data, uses reference algorithm // (CPU code for SrSPMV and python scripts for other algorithms, see // python scripts here: //sw/gpgpu/nvgraph/test/ref/) with reference results, compares those two. // It also measures performance of single algorithm C API call, enf enabled (see below) // 3) Corner cases tests - tests with some bad inputs, bad parameters, expects library to handle // it gracefully // 4) Stress tests - makes sure that library result is persistent throughout the library usage // (a lot of C API calls). Also makes some assumptions and checks on memory usage during // this test. // // We can control what tests to launch by using gtest filters. For example: // Only sanity tests: // ./nvgraph_capi_tests_traversal --gtest_filter=*Sanity* // And, correspondingly: // ./nvgraph_capi_tests_traversal --gtest_filter=*Correctness* // ./nvgraph_capi_tests_traversal --gtest_filter=*Corner* // ./nvgraph_capi_tests_traversal --gtest_filter=*Stress* // Or, combination: // ./nvgraph_capi_tests_traversal --gtest_filter=*Sanity*:*Correctness* // // Performance reports are provided in the ERIS format and disabled by default. // Could be enabled by adding '--perf' to the command line. I added this parameter to vlct // // Parameter '--stress-iters N', which gives multiplier (not an absolute value) for the number of launches for stress tests // #include <utility> #include "gtest/gtest.h" #include "nvgraph_test_common.h" #include "valued_csr_graph.hxx" #include "readMatrix.hxx" #include "nvgraphP.h" #include "nvgraph.h" #include <nvgraph_experimental.h> // experimental header, contains hidden API entries, can be shared only under special circumstances without reveling internal things #include "stdlib.h" #include <algorithm> #include <numeric> #include <queue> #include <cstdint> #include <math.h> // do the perf measurements, enabled by command line parameter '--perf' static int PERF = 0; // minimum vertices in the graph to perform perf measurements #define PERF_ROWS_LIMIT 10000 // number of repeats = multiplier/num_vertices #define Traversal_ITER_MULTIPLIER 30000000 template<typename T> struct nvgraph_Const; template<> struct nvgraph_Const<int> { static const cudaDataType_t Type = CUDA_R_32I; static const int inf; }; const int nvgraph_Const<int>::inf = INT_MAX; static std::string ref_data_prefix = ""; static std::string graph_data_prefix = ""; // iterations for stress tests = this multiplier * iterations for perf tests static int STRESS_MULTIPLIER = 10; void offsetsToIndices(std::vector<int>& offsets, std::vector<int>& indices) { int nnz = offsets.back(); indices.resize(nnz); int n = offsets.size() - 1; for (int row = 0; row < n; row++) { for (int pos = offsets[row]; pos < offsets[row + 1]; pos++) indices[pos] = row; } } bool enough_device_memory(int n, int nnz, size_t add) { size_t mtotal, mfree; cudaMemGetInfo(&mfree, &mtotal); if (mfree > add + sizeof(int) * (4 * n)) //graph + pred + distances + 2n (working data) return true; return false; } std::string convert_to_local_path(const std::string& in_file) { std::string wstr = in_file; if ((wstr != "dummy") & (wstr != "")) { std::string prefix; if (graph_data_prefix.length() > 0) { prefix = graph_data_prefix; } else { #ifdef _WIN32 //prefix = "C:\\mnt\\eris\\test\\matrices_collection\\"; prefix = "Z:\\matrices_collection\\"; std::replace(wstr.begin(), wstr.end(), '/', '\\'); #else prefix = "/mnt/nvgraph_test_data/"; #endif } wstr = prefix + wstr; } return wstr; } void ref_bfs(int n, int nnz, int *rowPtr, int *colInd, int *mask, int source_vertex, int *distances) { for (int i = 0; i != n; ++i) distances[i] = -1; std::queue<int> q; q.push(source_vertex); distances[source_vertex] = 0; while (!q.empty()) { int u = q.front(); q.pop(); for (int iCol = rowPtr[u]; iCol != rowPtr[u + 1]; ++iCol) { if (mask && !mask[iCol]) continue; int v = colInd[iCol]; if (distances[v] == -1) { //undiscovered distances[v] = distances[u] + 1; q.push(v); } } } } typedef struct Traversal_Usecase_t { std::string graph_file; int source_vert; bool useMask; bool undirected; Traversal_Usecase_t(const std::string& a, int b, bool _useMask = false, bool _undirected = false) : source_vert(b), useMask(_useMask), undirected(_undirected) { graph_file = convert_to_local_path(a); } ; Traversal_Usecase_t& operator=(const Traversal_Usecase_t& rhs) { graph_file = rhs.graph_file; source_vert = rhs.source_vert; useMask = rhs.useMask; return *this; } } Traversal_Usecase; //// Traversal tests class NVGraphCAPITests_2d_bfs: public ::testing::TestWithParam<Traversal_Usecase> { public: NVGraphCAPITests_2d_bfs() : handle(NULL) { } static void SetupTestCase() { } static void TearDownTestCase() { } virtual void SetUp() { if (handle == NULL) { char* nvgraph_gpus = getenv("NVGRAPH_GPUS"); if (nvgraph_gpus) printf("Value of NVGRAPH_GPUS=%s\n", nvgraph_gpus); else printf("Value of NVGRAPH_GPUS is null\n"); std::vector<int32_t> gpus; int32_t dummy; std::stringstream ss(nvgraph_gpus); while (ss >> dummy) { gpus.push_back(dummy); if (ss.peek() == ',') ss.ignore(); } printf("There were %d devices found: ", (int) gpus.size()); for (int i = 0; i < gpus.size(); i++) std::cout << gpus[i] << " "; std::cout << "\n"; devices = (int32_t*) malloc(sizeof(int32_t) * gpus.size()); for (int i = 0; i < gpus.size(); i++) devices[i] = gpus[i]; numDevices = gpus.size(); status = nvgraphCreateMulti(&handle, numDevices, devices); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; if (devices) free(devices); } } nvgraphStatus_t status; nvgraphHandle_t handle; int32_t *devices; int32_t numDevices; template<typename EdgeT> void run_current_test(const Traversal_Usecase& param) { const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << param.source_vert; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file) + std::string("_") + ss.str().c_str(); nvgraphTopologyType_t topo = NVGRAPH_2D_32I_32I; nvgraphStatus_t status; FILE* fpin = fopen(param.graph_file.c_str(), "rb"); ASSERT_TRUE(fpin != NULL)<< "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a network in amgx binary format ASSERT_EQ(read_header_amgx_csr_bin(fpin, n, nnz), 0); std::vector<int> read_row_ptr(n + 1), read_col_ind(nnz); std::vector<EdgeT> csr_read_val(nnz); std::cout << getFileName(param.graph_file) << " Vertices: " << n << " Edges: " << nnz << "\n"; ASSERT_EQ(read_data_amgx_csr_bin(fpin, n, nnz, read_row_ptr, read_col_ind, csr_read_val), 0); fclose(fpin); std::vector<int> row_ind; offsetsToIndices(read_row_ptr, row_ind); std::vector<int> csr_mask(nnz, 1); if (param.useMask) { //Generating a mask //Should be improved for (int i = 0; i < nnz; i += 2) csr_mask[i] = 0; } if (!enough_device_memory(n, nnz, sizeof(int) * (read_row_ptr.size() + read_col_ind.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph int32_t blockN = std::max(2,(int)ceil(sqrt(numDevices))); nvgraph2dCOOTopology32I_st topology = { n, nnz, &row_ind[0], &read_col_ind[0], CUDA_R_32I, NULL, numDevices, devices, blockN, NVGRAPH_DEFAULT }; status = nvgraphSetGraphStructure(handle, g1, (void*) &topology, topo); // set up graph data std::vector<int> calculated_distances_res(n); std::vector<int> calculated_predecessors_res(n); // if (param.useMask) { // status = nvgraphAllocateEdgeData(handle, g1, numsets_e, type_e); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // } int source_vert = param.source_vert; if (param.useMask) { //if we need to use a mask //Copying mask into graph //status = nvgraphSetEdgeData(handle, g1, &csr_mask[0], 0); //ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //nvgraphTraversalSetEdgeMaskIndex(&traversal_param, 0); } status = nvgraph2dBfs(handle, g1, source_vert, &calculated_distances_res[0], &calculated_predecessors_res[0]); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDeviceSynchronize(); if (PERF && n > PERF_ROWS_LIMIT) { double start, stop; start = second(); int repeat = 30; for (int i = 0; i < repeat; i++) { status = nvgraph2dBfs(handle, g1, source_vert, &calculated_distances_res[0], &calculated_predecessors_res[0]); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } cudaDeviceSynchronize(); stop = second(); printf("&&&& PERF Time_%s %10.8f -ms\n", test_id.c_str(), 1000.0 * (stop - start) / repeat); } ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // check with reference std::vector<int> expected_distances_res(n); ref_bfs(n, nnz, &read_row_ptr[0], &read_col_ind[0], &csr_mask[0], source_vert, &expected_distances_res[0]); //Checking distances // int wrong = 0; // for (int i = 0; i < n; i++) { // if (expected_distances_res[i] != calculated_distances_res[i]) { // wrong++; // std::cout << "Error at " << i << " expected " << expected_distances_res[i] << " actual " // << calculated_distances_res[i] << "\n"; // } // } // std::cout << wrong << "/" << n << " distances are incorrect.\n"; for (int i = 0; i < n; ++i) { ASSERT_EQ(expected_distances_res[i], calculated_distances_res[i])<< "Wrong distance from source in row #" << i << " graph " << param.graph_file << " source_vert=" << source_vert<< "\n"; } //Checking predecessors for (int i = 0; i < n; ++i) { if (calculated_predecessors_res[i] != -1) { ASSERT_EQ(expected_distances_res[i], expected_distances_res[calculated_predecessors_res[i]] + 1)<< "Wrong predecessor in row #" << i << " graph " << param.graph_file << " source_vert=" << source_vert<< "\n"; } else { ASSERT_TRUE(expected_distances_res[i] == 0 || expected_distances_res[i] == -1) << "Wrong predecessor in row #" << i << " graph " << param.graph_file << " source_vert=" << source_vert<< "\n"; } } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_2d_bfs, CheckResult) { run_current_test<float>(GetParam()); } /// Few sanity checks. class NVGraphCAPITests_2d_bfs_Sanity: public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; int32_t* devices; int32_t numDevices; NVGraphCAPITests_2d_bfs_Sanity() : handle(NULL) { } static void SetupTestCase() { } static void TearDownTestCase() { } virtual void SetUp() { topo = NVGRAPH_2D_32I_32I; nvgraphStatus_t status; if (handle == NULL) { char* nvgraph_gpus = getenv("NVGRAPH_GPUS"); if (nvgraph_gpus) printf("Value of NVGRAPH_GPUS=%s\n", nvgraph_gpus); else printf("Value of NVGRAPH_GPUS is null\n"); std::vector<int32_t> gpus; int32_t dummy; std::stringstream ss(nvgraph_gpus); while (ss >> dummy) { gpus.push_back(dummy); if (ss.peek() == ',') ss.ignore(); } printf("There were %d devices found: ", (int) gpus.size()); for (int i = 0; i < gpus.size(); i++) std::cout << gpus[i] << " "; std::cout << "\n"; devices = (int32_t*) malloc(sizeof(int32_t) * gpus.size()); for (int i = 0; i < gpus.size(); i++) devices[i] = gpus[i]; numDevices = gpus.size(); status = nvgraphCreateMulti(&handle, numDevices, devices); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template<typename EdgeT> void prepare_and_run(nvgraph2dCOOTopology32I_st& topo_st, int* expected) { g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph n = topo_st.nvertices; nnz = topo_st.nedges; status = nvgraphSetGraphStructure(handle, g1, (void*) &topo_st, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int source_vert = 0; // Call BFS std::vector<int> calculated_dist(n); std::vector<int> calculated_pred(n); status = nvgraph2dBfs(handle, g1, source_vert, &calculated_dist[0], &calculated_pred[0]); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // Check results against reference implementation for (int row = 0; row < n; row++) { int reference_res = (int) expected[row]; int nvgraph_res = (int) calculated_dist[row]; ASSERT_EQ(reference_res, nvgraph_res); } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // cycle graph, shortest path = vertex number template<typename EdgeT> void run_cycle_test() { n = 1024; nnz = n; std::vector<int> offsets(n + 1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[i] = i; neighborhood[i] = (i + 1) % n; } offsets[n] = n; std::vector<int> expected_res(n, nvgraph_Const<int>::inf); for (int i = 0; i < n; i++) { expected_res[i] = i; } int32_t blockN = std::max(2,(int)ceil(sqrt(numDevices))); nvgraph2dCOOTopology32I_st topology = { n, nnz, &offsets[0], &neighborhood[0], CUDA_R_32I, NULL, numDevices, devices, blockN, NVGRAPH_DEFAULT }; prepare_and_run<EdgeT>(topology, &expected_res[0]); free(devices); } template<typename EdgeT> void run_cycle_test_undirected() { n = 16; nnz = n * 2; std::vector<int> offsets(n + 1), neighborhood(nnz); for (int i = 0; i < n; i++) { offsets[i] = i * 2; neighborhood[i * 2] = (i - 1 + n) % n; neighborhood[i * 2 + 1] = (i + 1 + n) % n; } offsets[n] = nnz; std::vector<int> expected_res(n, nvgraph_Const<int>::inf); for (int i = 0; i < n; i++) { expected_res[i] = i; } int32_t blockN = std::max(2,(int)ceil(sqrt(numDevices))); nvgraph2dCOOTopology32I_st topology = { n, nnz, &offsets[0], &neighborhood[0], CUDA_R_32I, NULL, numDevices, devices, blockN, NVGRAPH_DEFAULT }; prepare_and_run<EdgeT>(topology, &expected_res[0]); free(devices); } template<typename EdgeT> void run_block_skip_test() { n = 10; nnz = 4; int rowIndices[4] = { 0, 1, 5, 6 }; int columnIndices[4] = { 1, 5, 6, 3 }; int expected[10] = { 0, 1, -1, 4, -1, 2, 3, -1, -1, -1 }; int32_t blockN = std::max(2,(int)ceil(sqrt(numDevices))); nvgraph2dCOOTopology32I_st topology = { n, nnz, rowIndices, columnIndices, CUDA_R_32I, NULL, numDevices, devices, blockN, NVGRAPH_DEFAULT }; prepare_and_run<EdgeT>(topology, expected); free(devices); } template<typename EdgeT> void run_multi_path_test() { n = 10; nnz = 6; int rowIndices[6] = { 0, 0, 1, 5, 5, 6 }; int columnIndices[6] = { 1, 5, 6, 6, 9, 9 }; int expected[10] = { 0, 1, -1, -1, -1, 1, 2, -1, -1, 2 }; int32_t blockN = std::max(2,(int)ceil(sqrt(numDevices))); nvgraph2dCOOTopology32I_st topology = { n, nnz, rowIndices, columnIndices, CUDA_R_32I, NULL, numDevices, devices, blockN, NVGRAPH_DEFAULT }; prepare_and_run<EdgeT>(topology, expected); free(devices); } }; TEST_F(NVGraphCAPITests_2d_bfs_Sanity, SanityCycle) { run_cycle_test<float>(); } TEST_F(NVGraphCAPITests_2d_bfs_Sanity, BlockSkip) { run_block_skip_test<float>(); } TEST_F(NVGraphCAPITests_2d_bfs_Sanity, MultiPath) { run_multi_path_test<float>(); } // class NVGraphCAPITests_Traversal_Stress: public ::testing::TestWithParam<Traversal_Usecase> { // public: // NVGraphCAPITests_Traversal_Stress() : // handle(NULL) { // } // // static void SetupTestCase() { // } // static void TearDownTestCase() { // } // virtual void SetUp() { // //const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); // //printf("We are in test %s of test case %s.\n", test_info->name(), test_info->test_case_name()); // if (handle == NULL) { // int* devices = (int*)malloc(sizeof(int) * 2); // devices[0] = 0; // devices[1] = 1; // status = nvgraphCreateMulti(&handle, 2, devices); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // free(devices); // } // } // virtual void TearDown() { // if (handle != NULL) { // status = nvgraphDestroy(handle); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // handle = NULL; // } // } // nvgraphStatus_t status; // nvgraphHandle_t handle; // // template<typename EdgeT> // void run_current_test(const Traversal_Usecase& param) // { // nvgraphTopologyType_t topo = NVGRAPH_2D_32I_32I; // // nvgraphStatus_t status; // // FILE* fpin = fopen(param.graph_file.c_str(), "rb"); // ASSERT_TRUE(fpin != NULL)<< "Cannot read input graph file: " << param.graph_file << std::endl; // int n, nnz; // //Read a network in amgx binary format and the bookmark of dangling nodes // ASSERT_EQ(read_header_amgx_csr_bin(fpin, n, nnz), 0); // std::vector<int> read_row_ptr(n + 1), read_col_ind(nnz); // std::vector<EdgeT> read_val(nnz); // ASSERT_EQ(read_data_amgx_csr_bin(fpin, n, nnz, read_row_ptr, read_col_ind, read_val), 0); // fclose(fpin); // // nvgraphGraphDescr_t g1 = NULL; // status = nvgraphCreateGraphDescr(handle, &g1); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // // // set up graph // int* devices = (int*)malloc(sizeof(int) * 2); // devices[0] = 0; // devices[1] = 1; // nvgraph2dCOOTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0], CUDA_R_32I, NULL, 2, devices, 2, NVGRAPH_DEFAULT}; // status = nvgraphSetGraphStructure(handle, g1, (void*) &topology, topo); // free(devices); // // std::vector<int> calculated_res(n); // int source_vert = param.source_vert; // // // run // int repeat = 2;//std::max((int)(((float)(Traversal_ITER_MULTIPLIER)*STRESS_MULTIPLIER)/(3*n)), 1); // // std::vector<int> calculated_res1(n), calculated_res_mid1(n), calculated_res_last(n); // std::vector<int> calculated_res2(n), calculated_res_mid2(n); // size_t free_mid = 0, free_last = 0, total = 0; // for (int i = 0; i < repeat; i++) { // status = nvgraphTraversal( handle, // g1, // NVGRAPH_TRAVERSAL_BFS, // &source_vert, // traversal_param); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // // // all of those should be equal // if (i == 0) // { // status = nvgraphGetVertexData(handle, // g1, // (void *) &calculated_res1[0], // traversal_distances_index); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // status = nvgraphGetVertexData(handle, // g1, // (void *) &calculated_res2[0], // traversal_predecessors_index); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // } // else // { // status = nvgraphGetVertexData(handle, // g1, // (void *) &calculated_res_mid1[0], // traversal_distances_index); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // status = nvgraphGetVertexData(handle, // g1, // (void *) &calculated_res_mid2[0], // traversal_predecessors_index); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // // for (int row = 0; row < n; row++) // { // ASSERT_EQ(calculated_res1[row], calculated_res_mid1[row])<< "Difference in result in distances for row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; // // predecessors could be different since multiple shortest paths are possible // //ASSERT_EQ(calculated_res2[row], calculated_res_mid2[row]) << "Difference in result in predecessors for row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; // } // } // // if (i == std::min(50, (int) (repeat / 2))) // { // cudaMemGetInfo(&free_mid, &total); // } // if (i == repeat - 1) // { // status = nvgraphGetVertexData(handle, // g1, // (void *) &calculated_res_last[0], // traversal_distances_index); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // cudaMemGetInfo(&free_last, &total); // } // } // // ASSERT_LE(free_mid, free_last)<< "Memory difference between iteration #" << std::min(50, (int)(repeat/2)) << " and last iteration is " << (double)(free_last-free_mid)/1e+6 << "MB"; // // status = nvgraphDestroyGraphDescr(handle, g1); // ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // } // }; // // TEST_P(NVGraphCAPITests_Traversal_Stress, Stress) // { // run_current_test<float>(GetParam()); // } // instatiation of the performance/correctness checks INSTANTIATE_TEST_CASE_P(CorrectnessCheck, NVGraphCAPITests_2d_bfs, // graph FILE source vert # file with expected result (in binary?) ::testing::Values( Traversal_Usecase("graphs/cage/cage13_T.mtx.bin", 0) , Traversal_Usecase("graphs/cage/cage13_T.mtx.bin", 10) , Traversal_Usecase("graphs/cage/cage14_T.mtx.bin", 0) , Traversal_Usecase("graphs/cage/cage14_T.mtx.bin", 10) , Traversal_Usecase("graphs/small/small.bin", 0) , Traversal_Usecase("graphs/small/small.bin", 0) , Traversal_Usecase("graphs/small/small.bin", 3) , Traversal_Usecase("graphs/dblp/dblp.bin", 0, false, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 100, false, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 1000, false, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 100000, false, true) , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 0) , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 100) , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 10000) , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 100000) , Traversal_Usecase("graphs/Wikipedia/2011/wiki2011.bin", 1) , Traversal_Usecase("graphs/Wikipedia/2011/wiki2011.bin", 1000) , Traversal_Usecase("dimacs10/road_usa_T.mtx.bin", 100) , Traversal_Usecase("graphs/Twitter/twitter.bin", 0) , Traversal_Usecase("graphs/Twitter/twitter.bin", 100) , Traversal_Usecase("graphs/Twitter/twitter.bin", 10000) , Traversal_Usecase("graphs/Twitter/twitter.bin", 3000000) , Traversal_Usecase("dimacs10/hugebubbles-00020_T.mtx.bin", 100000) // /// instances using mask // , Traversal_Usecase("graphs/small/small.bin", 0, true) // , Traversal_Usecase("graphs/small/small.bin", 0, true) // , Traversal_Usecase("graphs/small/small.bin", 3, true) // , Traversal_Usecase("graphs/dblp/dblp.bin", 0, true) // , Traversal_Usecase("graphs/dblp/dblp.bin", 100, true) // , Traversal_Usecase("graphs/dblp/dblp.bin", 1000, true) // , Traversal_Usecase("graphs/dblp/dblp.bin", 100000, true) // , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 0, true) ) ); // INSTANTIATE_TEST_CASE_P(StressTest, // NVGraphCAPITests_Traversal_Stress, // ::testing::Values( // Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 0) // ) // ); int main(int argc, char **argv) { for (int i = 0; i < argc; i++) { if (strcmp(argv[i], "--perf") == 0) PERF = 1; if (strcmp(argv[i], "--stress-iters") == 0) STRESS_MULTIPLIER = atoi(argv[i + 1]); if (strcmp(argv[i], "--ref-data-dir") == 0) ref_data_prefix = std::string(argv[i + 1]); if (strcmp(argv[i], "--graph-data-dir") == 0) graph_data_prefix = std::string(argv[i + 1]); } srand(42); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_test_common.h
#include <stdlib.h> /* import labs() */ #include <math.h> #include <iostream> #include <string> #if defined(_WIN32) #if !defined(WIN32_LEAN_AND_MEAN) #define WIN32_LEAN_AND_MEAN #endif #define NOMINMAX #include <windows.h> static double second (void) { LARGE_INTEGER t; static double oofreq; static int checkedForHighResTimer; static BOOL hasHighResTimer; if (!checkedForHighResTimer) { hasHighResTimer = QueryPerformanceFrequency (&t); oofreq = 1.0 / (double)t.QuadPart; checkedForHighResTimer = 1; } if (hasHighResTimer) { QueryPerformanceCounter (&t); return (double)t.QuadPart * oofreq; } else { return (double)GetTickCount() / 1000.0; } } static long long getSystemMemory() { MEMORYSTATUSEX state; // Requires >= win2k memset (&state, 0, sizeof(state)); state.dwLength = sizeof(state); if (0 == GlobalMemoryStatusEx(&state)) { return 0; } else { return (long long)state.ullTotalPhys; } } #elif defined(__linux) || defined(__powerpc64__) #include <stddef.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/sysinfo.h> static double second (void) { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; } static long long getSystemMemory(void) { struct sysinfo s_info; sysinfo (&s_info); return (long long)s_info.totalram * (long long)s_info.mem_unit; } #elif defined(__APPLE__) #include <stddef.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/types.h> #include <sys/sysctl.h> static double second (void) { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; } static long long getSystemMemory(void) { int memmib[2] = { CTL_HW, HW_MEMSIZE }; long long mem = (size_t)0; size_t memsz = sizeof(mem); /* NOTE: This may cap memory reported at 2GB */ if (sysctl(memmib, 2, &mem, &memsz, NULL, 0) == -1) { return 0; } else { return mem; } } #elif defined(__QNX__) #include <stddef.h> #include <sys/time.h> #include <sys/resource.h> static double second (void) { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; } static long long getSystemMemory(void) { return 0; } #else #error unsupported platform #endif std::string getFileName(const std::string& s) { char sep = '/'; #ifdef _WIN32 sep = '\\'; #endif size_t i = s.rfind(sep, s.length()); if (i != std::string::npos) { return(s.substr(i+1, s.length() - i)); } return(""); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/2d_partitioning_test.cpp
#include "gtest/gtest.h" #include "nvgraph.h" #include <iostream> TEST(SimpleBFS2D, DummyTest) { nvgraphHandle_t handle; int* devices = (int*) malloc(sizeof(int) * 2); devices[0] = 0; devices[1] = 1; nvgraphCreateMulti(&handle, 2, devices); nvgraphGraphDescr_t graph; nvgraphCreateGraphDescr(handle, &graph); int rowIds[38] = { 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8, 8 }; int colIds[38] = { 1, 2, 7, 8, 0, 2, 4, 7, 8, 0, 1, 3, 6, 8, 2, 4, 5, 6, 8, 1, 3, 5, 8, 3, 4, 6, 7, 2, 3, 5, 0, 1, 5, 0, 1, 2, 3, 4 }; nvgraph2dCOOTopology32I_st topo; topo.nvertices = 9; topo.nedges = 38; topo.source_indices = rowIds; topo.destination_indices = colIds; topo.valueType = CUDA_R_32I; topo.values = NULL; topo.numDevices = 2; topo.devices = devices; topo.blockN = 2; topo.tag = NVGRAPH_DEFAULT; nvgraphSetGraphStructure(handle, graph, &topo, NVGRAPH_2D_32I_32I); int* distances = (int*) malloc(sizeof(int) * 9); int* predecessors = (int*) malloc(sizeof(int) * 9); int sourceId = 0; std::cout << "Source ID: " << sourceId << "\n"; nvgraph2dBfs(handle, graph, sourceId, distances, predecessors); std::cout << "Distances:\n"; for (int i = 0; i < 9; i++) std::cout << i << ":" << distances[i] << " "; std::cout << "\nPredecessors:\n"; for (int i = 0; i < 9; i++) std::cout << i << ":" << predecessors[i] << " "; std::cout << "\n"; int exp_pred[9] = {-1,0,0,2,1,7,2,0,0}; int exp_dist[9] = {0,1,1,2,2,2,2,1,1}; for (int i = 0; i < 9; i++){ ASSERT_EQ(exp_pred[i], predecessors[i]); ASSERT_EQ(exp_dist[i], distances[i]); } std::cout << "Test run!\n"; } int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_capi_tests_traversal.cpp
// This is gtest application that contains all of the C API tests. Parameters: // nvgraph_capi_tests [--perf] [--stress-iters N] [--gtest_filter=NameFilterPatter] // It also accepts any other gtest (1.7.0) default parameters. // Right now this application contains: // 1) Sanity Check tests - tests on simple examples with known answer (or known behaviour) // 2) Correctness checks tests - tests on real graph data, uses reference algorithm // (CPU code for SrSPMV and python scripts for other algorithms, see // python scripts here: //sw/gpgpu/nvgraph/test/ref/) with reference results, compares those two. // It also measures performance of single algorithm C API call, enf enabled (see below) // 3) Corner cases tests - tests with some bad inputs, bad parameters, expects library to handle // it gracefully // 4) Stress tests - makes sure that library result is persistent throughout the library usage // (a lot of C API calls). Also makes some assumptions and checks on memory usage during // this test. // // We can control what tests to launch by using gtest filters. For example: // Only sanity tests: // ./nvgraph_capi_tests_traversal --gtest_filter=*Sanity* // And, correspondingly: // ./nvgraph_capi_tests_traversal --gtest_filter=*Correctness* // ./nvgraph_capi_tests_traversal --gtest_filter=*Corner* // ./nvgraph_capi_tests_traversal --gtest_filter=*Stress* // Or, combination: // ./nvgraph_capi_tests_traversal --gtest_filter=*Sanity*:*Correctness* // // Performance reports are provided in the ERIS format and disabled by default. // Could be enabled by adding '--perf' to the command line. I added this parameter to vlct // // Parameter '--stress-iters N', which gives multiplier (not an absolute value) for the number of launches for stress tests // #include <utility> #include "gtest/gtest.h" #include "nvgraph_test_common.h" #include "valued_csr_graph.hxx" #include "readMatrix.hxx" #include "nvgraphP.h" #include "nvgraph.h" #include <nvgraph_experimental.h> // experimental header, contains hidden API entries, can be shared only under special circumstances without reveling internal things #include "stdlib.h" #include <algorithm> #include <numeric> #include <queue> // do the perf measurements, enabled by command line parameter '--perf' static int PERF = 0; // minimum vertices in the graph to perform perf measurements #define PERF_ROWS_LIMIT 10000 // number of repeats = multiplier/num_vertices #define Traversal_ITER_MULTIPLIER 30000000 template <typename T> struct nvgraph_Const; template <> struct nvgraph_Const<int> { static const cudaDataType_t Type = CUDA_R_32I; static const int inf; }; const int nvgraph_Const<int>::inf = INT_MAX; static std::string ref_data_prefix = ""; static std::string graph_data_prefix = ""; // iterations for stress tests = this multiplier * iterations for perf tests static int STRESS_MULTIPLIER = 10; bool enough_device_memory(int n, int nnz, size_t add) { size_t mtotal, mfree; cudaMemGetInfo(&mfree, &mtotal); if (mfree > add + sizeof(int)*(4*n)) //graph + pred + distances + 2n (working data) return true; return false; } std::string convert_to_local_path(const std::string& in_file) { std::string wstr = in_file; if ((wstr != "dummy") & (wstr != "")) { std::string prefix; if (graph_data_prefix.length() > 0) { prefix = graph_data_prefix; } else { #ifdef _WIN32 //prefix = "C:\\mnt\\eris\\test\\matrices_collection\\"; prefix = "Z:\\matrices_collection\\"; std::replace(wstr.begin(), wstr.end(), '/', '\\'); #else prefix = "/mnt/nvgraph_test_data/"; #endif } wstr = prefix + wstr; } return wstr; } void ref_bfs(int n, int nnz, int *rowPtr, int *colInd, int *mask, int source_vertex, int *distances) { for(int i=0; i!=n; ++i) distances[i] = INT_MAX; std::queue<int> q; q.push(source_vertex); distances[source_vertex] = 0; while(!q.empty()) { int u = q.front(); q.pop(); for(int iCol = rowPtr[u]; iCol != rowPtr[u+1]; ++iCol) { if(mask && !mask[iCol]) continue; int v = colInd[iCol]; if(distances[v] == INT_MAX) { //undiscovered distances[v] = distances[u] + 1; q.push(v); } } } } typedef struct Traversal_Usecase_t { std::string graph_file; int source_vert; bool useMask; bool undirected; Traversal_Usecase_t(const std::string& a, int b, bool _useMask=false, bool _undirected=false) : source_vert(b), useMask(_useMask), undirected(_undirected) { graph_file = convert_to_local_path(a); }; Traversal_Usecase_t& operator=(const Traversal_Usecase_t& rhs) { graph_file = rhs.graph_file; source_vert = rhs.source_vert; useMask = rhs.useMask; return *this; } } Traversal_Usecase; //// Traversal tests class NVGraphCAPITests_Traversal : public ::testing::TestWithParam<Traversal_Usecase> { public: NVGraphCAPITests_Traversal() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename EdgeT> void run_current_test(const Traversal_Usecase& param) { const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << param.source_vert; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file) + std::string("_") + ss.str().c_str(); nvgraphTopologyType_t topo = NVGRAPH_CSR_32; nvgraphStatus_t status; FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a network in amgx binary format ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<EdgeT> csr_read_val(nnz); ASSERT_EQ(read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, csr_read_val), 0); fclose(fpin); std::vector<int> csr_mask(nnz, 1); if(param.useMask) { //Generating a mask //Should be improved for(int i=0; i < nnz; i += 2) csr_mask[i] = 0; } if (!enough_device_memory(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSRTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); // set up graph data size_t numsets_v = 2, numsets_e = param.useMask ? 1 : 0; std::vector<int> calculated_distances_res(n); std::vector<int> calculated_predecessors_res(n); //void* vertexptr[1] = {(void*)&calculated_res[0]}; cudaDataType_t type_v[2] = {nvgraph_Const<int>::Type, nvgraph_Const<int>::Type}; cudaDataType_t type_e[1] = {nvgraph_Const<int>::Type}; status = nvgraphAllocateVertexData(handle, g1, numsets_v, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); if(param.useMask) { status = nvgraphAllocateEdgeData(handle, g1, numsets_e, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } int source_vert = param.source_vert; nvgraphTraversalParameter_t traversal_param; nvgraphTraversalParameterInit(&traversal_param); nvgraphTraversalSetDistancesIndex(&traversal_param, 0); nvgraphTraversalSetPredecessorsIndex(&traversal_param, 1); nvgraphTraversalSetUndirectedFlag(&traversal_param, param.undirected); if(param.useMask) { //if we need to use a mask //Copying mask into graph status = nvgraphSetEdgeData(handle, g1, &csr_mask[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphTraversalSetEdgeMaskIndex(&traversal_param, 0); } status = nvgraphTraversal(handle, g1, NVGRAPH_TRAVERSAL_BFS, &source_vert, traversal_param); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDeviceSynchronize(); if (PERF && n > PERF_ROWS_LIMIT) { double start, stop; start = second(); int repeat = 30; for (int i = 0; i < repeat; i++) { status = nvgraphTraversal(handle, g1, NVGRAPH_TRAVERSAL_BFS, &source_vert, traversal_param); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } cudaDeviceSynchronize(); stop = second(); printf("&&&& PERF Time_%s %10.8f -ms\n", test_id.c_str(), 1000.0*(stop-start)/repeat); } ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // get result status = nvgraphGetVertexData(handle, g1, (void *)&calculated_distances_res[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(handle, g1, (void *)&calculated_predecessors_res[0], 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // check with reference std::vector<int> expected_distances_res(n); ref_bfs(n, nnz, &read_row_ptr[0], &read_col_ind[0], &csr_mask[0], source_vert, &expected_distances_res[0]); //Checking distances for (int i = 0; i < n; ++i) { ASSERT_EQ(expected_distances_res[i], calculated_distances_res[i]) << "Wrong distance from source in row #" << i << " graph " << param.graph_file << " source_vert=" << source_vert<< "\n" ; } //Checking predecessors for (int i = 0; i < n; ++i) { if(calculated_predecessors_res[i] != -1) { ASSERT_EQ(expected_distances_res[i], expected_distances_res[calculated_predecessors_res[i]] + 1) << "Wrong predecessor in row #" << i << " graph " << param.graph_file << " source_vert=" << source_vert<< "\n" ; } else { ASSERT_TRUE(expected_distances_res[i] == 0 || expected_distances_res[i] == INT_MAX) << "Wrong predecessor in row #" << i << " graph " << param.graph_file << " source_vert=" << source_vert<< "\n" ; } } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_Traversal, CheckResult) { run_current_test<float>(GetParam()); } /// Few sanity checks. class NVGraphCAPITests_Traversal_Sanity : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_Traversal_Sanity() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSR_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template <typename EdgeT> void prepare_and_run(const nvgraphCSRTopology32I_st& topo_st, int* expected ) { g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph n = topo_st.nvertices; nnz = topo_st.nedges; status = nvgraphSetGraphStructure(handle, g1, (void*)&topo_st, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[1] = {nvgraph_Const<int>::Type}; status = nvgraphAllocateVertexData(handle, g1, 1, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int source_vert = 0; int traversal_distances_index = 0; nvgraphTraversalParameter_t traversal_param; nvgraphTraversalParameterInit(&traversal_param); nvgraphTraversalSetDistancesIndex(&traversal_param, traversal_distances_index); status = nvgraphTraversal(handle, g1, NVGRAPH_TRAVERSAL_BFS, &source_vert, traversal_param); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // get result std::vector<int> calculated_res(n); status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res[0], traversal_distances_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 0; row < n; row++) { int reference_res = (int)expected[row]; int nvgraph_res = (int)calculated_res[row]; ASSERT_EQ(reference_res, nvgraph_res); } status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // cycle graph, shortest path = vertex number template <typename EdgeT> void run_cycle_test() { n = 1024; nnz = n; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[i] = i; neighborhood[i] = (i + 1) % n; } offsets[n] = n; std::vector<int> expected_res(n, nvgraph_Const<int>::inf); for (int i = 0; i < n; i++) { expected_res[i] = i; } nvgraphCSRTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; prepare_and_run<EdgeT>(topology, &expected_res[0]); } }; TEST_F(NVGraphCAPITests_Traversal_Sanity, SanityCycle) { run_cycle_test<float>(); } class NVGraphCAPITests_Traversal_CornerCases : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; int n; int nnz; nvgraphGraphDescr_t g1; NVGraphCAPITests_Traversal_CornerCases() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { topo = NVGRAPH_CSR_32; nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } template <typename EdgeT> void run_cycle_test() { n = 1024; nnz = n; std::vector<int> offsets(n+1), neighborhood(n); for (int i = 0; i < n; i++) { offsets[i] = i; neighborhood[i] = (i + 1) % n; } offsets[n] = n; nvgraphCSRTopology32I_st topology = {n, nnz, &offsets[0], &neighborhood[0]}; int source_vert = 0; int traversal_distances_index = 0; g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // only multivaluedCSR are supported nvgraphTraversalParameter_t traversal_param; nvgraphTraversalParameterInit(&traversal_param); nvgraphTraversalSetDistancesIndex(&traversal_param, traversal_distances_index); status = nvgraphTraversal(handle, g1, NVGRAPH_TRAVERSAL_BFS, &source_vert, traversal_param); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); cudaDataType_t type_v[1] = {nvgraph_Const<int>::Type}; status = nvgraphAllocateVertexData(handle, g1, 1, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphTraversal(NULL, g1, NVGRAPH_TRAVERSAL_BFS, &source_vert, traversal_param); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphTraversal(handle, NULL, NVGRAPH_TRAVERSAL_BFS, &source_vert, traversal_param); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphTraversal(handle, g1, NVGRAPH_TRAVERSAL_BFS, NULL, traversal_param); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // only CSR is supported { status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSC_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData(handle, g1, 1, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphTraversalParameterInit(&traversal_param); nvgraphTraversalSetDistancesIndex(&traversal_param, traversal_distances_index); status = nvgraphTraversal(handle, g1, NVGRAPH_TRAVERSAL_BFS, &source_vert, traversal_param); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } }; TEST_F(NVGraphCAPITests_Traversal_CornerCases, CornerCases) { run_cycle_test<float>(); } class NVGraphCAPITests_Traversal_Stress : public ::testing::TestWithParam<Traversal_Usecase> { public: NVGraphCAPITests_Traversal_Stress() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { //const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); //printf("We are in test %s of test case %s.\n", test_info->name(), test_info->test_case_name()); if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename EdgeT> void run_current_test(const Traversal_Usecase& param) { nvgraphTopologyType_t topo = NVGRAPH_CSR_32; nvgraphStatus_t status; FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a network in amgx binary format and the bookmark of dangling nodes ASSERT_EQ(read_header_amgx_csr_bin (fpin, n, nnz), 0); std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<EdgeT> read_val(nnz); ASSERT_EQ(read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val), 0); fclose(fpin); nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSRTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); std::vector<int> calculated_res(n); // set up graph data //size_t numsets = 1; //cudaDataType_t type_v[1] = {nvgraph_Const<int>::Type}; size_t numsets = 2; cudaDataType_t type_v[2] = {nvgraph_Const<int>::Type, nvgraph_Const<int>::Type}; status = nvgraphAllocateVertexData(handle, g1, numsets, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int source_vert = param.source_vert; int traversal_distances_index = 0; int traversal_predecessors_index = 1; // run int repeat = 2;//std::max((int)(((float)(Traversal_ITER_MULTIPLIER)*STRESS_MULTIPLIER)/(3*n)), 1); std::vector<int> calculated_res1(n), calculated_res_mid1(n), calculated_res_last(n); std::vector<int> calculated_res2(n), calculated_res_mid2(n); size_t free_mid = 0, free_last = 0, total = 0; for (int i = 0; i < repeat; i++) { nvgraphTraversalParameter_t traversal_param; nvgraphTraversalParameterInit(&traversal_param); nvgraphTraversalSetPredecessorsIndex(&traversal_param, 1); nvgraphTraversalSetUndirectedFlag(&traversal_param, param.undirected); nvgraphTraversalSetDistancesIndex(&traversal_param, traversal_distances_index); status = nvgraphTraversal(handle, g1, NVGRAPH_TRAVERSAL_BFS, &source_vert, traversal_param); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // all of those should be equal if (i == 0) { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res1[0], traversal_distances_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res2[0], traversal_predecessors_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } else { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res_mid1[0], traversal_distances_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res_mid2[0], traversal_predecessors_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for (int row = 0; row < n; row++) { ASSERT_EQ(calculated_res1[row], calculated_res_mid1[row]) << "Difference in result in distances for row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; // predecessors could be different since multiple shortest paths are possible //ASSERT_EQ(calculated_res2[row], calculated_res_mid2[row]) << "Difference in result in predecessors for row #" << row << " graph " << param.graph_file << " for iterations #0 and iteration #" << i; } } if (i == std::min(50, (int)(repeat/2))) { cudaMemGetInfo(&free_mid, &total); } if (i == repeat-1) { status = nvgraphGetVertexData(handle, g1, (void *)&calculated_res_last[0], traversal_distances_index); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cudaMemGetInfo(&free_last, &total); } } ASSERT_LE(free_mid, free_last) << "Memory difference between iteration #" << std::min(50, (int)(repeat/2)) << " and last iteration is " << (double)(free_last-free_mid)/1e+6 << "MB"; status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_Traversal_Stress, Stress) { run_current_test<float>(GetParam()); } // instatiation of the performance/correctness checks INSTANTIATE_TEST_CASE_P(CorrectnessCheck, NVGraphCAPITests_Traversal, // graph FILE source vert # file with expected result (in binary?) ::testing::Values( Traversal_Usecase("graphs/cage/cage13_T.mtx.bin", 0) , Traversal_Usecase("graphs/cage/cage13_T.mtx.bin", 10) , Traversal_Usecase("graphs/cage/cage14_T.mtx.bin", 0) , Traversal_Usecase("graphs/cage/cage14_T.mtx.bin", 10) , Traversal_Usecase("graphs/small/small.bin", 0) , Traversal_Usecase("graphs/small/small.bin", 0) , Traversal_Usecase("graphs/small/small.bin", 3) , Traversal_Usecase("graphs/dblp/dblp.bin", 0, false, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 100, false, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 1000, false, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 100000, false, true) , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 0) , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 100) , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 10000) , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 100000) , Traversal_Usecase("graphs/Wikipedia/2011/wiki2011.bin", 1) , Traversal_Usecase("graphs/Wikipedia/2011/wiki2011.bin", 1000) //, Traversal_Usecase("graphs/citPatents/cit-Patents_T.mtx.bin", 6543, "") //, Traversal_Usecase("dimacs10/kron_g500-logn20_T.mtx.bin", 100000, "") //, Traversal_Usecase("dimacs10/hugetrace-00020_T.mtx.bin", 100000, "") //, Traversal_Usecase("dimacs10/delaunay_n24_T.mtx.bin", 100000, "") , Traversal_Usecase("dimacs10/road_usa_T.mtx.bin", 100) , Traversal_Usecase("graphs/Twitter/twitter.bin", 0) , Traversal_Usecase("graphs/Twitter/twitter.bin", 100) , Traversal_Usecase("graphs/Twitter/twitter.bin", 10000) , Traversal_Usecase("graphs/Twitter/twitter.bin", 3000000) //, Traversal_Usecase("dimacs10/hugebubbles-00020_T.mtx.bin", 100000) ///// instances using mask , Traversal_Usecase("graphs/small/small.bin", 0, true) , Traversal_Usecase("graphs/small/small.bin", 0, true) , Traversal_Usecase("graphs/small/small.bin", 3, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 0, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 100, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 1000, true) , Traversal_Usecase("graphs/dblp/dblp.bin", 100000, true) , Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 0, true) ) ); INSTANTIATE_TEST_CASE_P(StressTest, NVGraphCAPITests_Traversal_Stress, ::testing::Values( Traversal_Usecase("graphs/Wikipedia/2003/wiki2003.bin", 0) ) ); int main(int argc, char **argv) { for (int i = 0; i < argc; i++) { if (strcmp(argv[i], "--perf") == 0) PERF = 1; if (strcmp(argv[i], "--stress-iters") == 0) STRESS_MULTIPLIER = atoi(argv[i+1]); if (strcmp(argv[i], "--ref-data-dir") == 0) ref_data_prefix = std::string(argv[i+1]); if (strcmp(argv[i], "--graph-data-dir") == 0) graph_data_prefix = std::string(argv[i+1]); } srand(42); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_capi_tests_triangles.cpp
// This is gtest application that contains all of the C API tests. Parameters: // nvgraph_capi_tests [--perf] [--stress-iters N] [--gtest_filter=NameFilterPatter] // It also accepts any other gtest (1.7.0) default parameters. // Right now this application contains: // 1) Sanity Check tests - tests on simple examples with known answer (or known behaviour) // 2) Correctness checks tests - tests on real graph data, uses reference algorithm // (CPU code for SrSPMV and python scripts for other algorithms, see // python scripts here: //sw/gpgpu/nvgraph/test/ref/) with reference results, compares those two. // It also measures performance of single algorithm C API call, enf enabled (see below) // 3) Corner cases tests - tests with some bad inputs, bad parameters, expects library to handle // it gracefully // 4) Stress tests - makes sure that library result is persistent throughout the library usage // (a lot of C API calls). Also makes some assumptions and checks on memory usage during // this test. // // We can control what tests to launch by using gtest filters. For example: // Only sanity tests: // ./nvgraph_capi_tests --gtest_filter=*Sanity* // And, correspondingly: // ./nvgraph_capi_tests --gtest_filter=*Correctness* // ./nvgraph_capi_tests --gtest_filter=*Corner* // ./nvgraph_capi_tests --gtest_filter=*Stress* // Or, combination: // ./nvgraph_capi_tests --gtest_filter=*Sanity*:*Correctness* // // Performance reports are provided in the ERIS format and disabled by default. // Could be enabled by adding '--perf' to the command line. I added this parameter to vlct // // Parameter '--stress-iters N', which gives multiplier (not an absolute value) for the number of launches for stress tests // #include <utility> #include "gtest/gtest.h" #include "nvgraph_test_common.h" #include "valued_csr_graph.hxx" #include "readMatrix.hxx" #include "nvgraphP.h" #include "nvgraph.h" #include <nvgraph_experimental.h> // experimental header, contains hidden API entries, can be shared only under special circumstances without reveling internal things #include "stdlib.h" #include "stdint.h" #include <algorithm> // do the perf measurements, enabled by command line parameter '--perf' static int PERF = 0; // minimum vertices in the graph to perform perf measurements #define PERF_ROWS_LIMIT 10000 static int complex_repeats = 20; static std::string ref_data_prefix = ""; static std::string graph_data_prefix = ""; template <typename T> struct comparison { bool operator() (T* lhs, T* rhs) {return (*lhs) < (*rhs);} }; template <typename T> bool enough_device_memory(int n, int nnz, size_t add) { size_t mtotal, mfree; cudaMemGetInfo(&mfree, &mtotal); if (mfree > add + sizeof(T)*3*(n + nnz)) return true; return false; } std::string convert_to_local_path(const std::string& in_file) { std::string wstr = in_file; if ((wstr != "dummy") & (wstr != "")) { std::string prefix; if (graph_data_prefix.length() > 0) { prefix = graph_data_prefix; } else { #ifdef _WIN32 //prefix = "C:\\mnt\\eris\\test\\matrices_collection\\"; prefix = "\\\\cuda-vnetapp\\eris_matrices\\"; std::replace(wstr.begin(), wstr.end(), '/', '\\'); #else prefix = "/mnt/nvgraph_test_data/"; #endif } wstr = prefix + wstr; } return wstr; } class NVGraphCAPITests_Triangles_Sanity : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphTopologyType_t topo; nvgraphGraphDescr_t g1; NVGraphCAPITests_Triangles_Sanity() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { nvgraphStatus_t status; if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } void prepare_and_run(const void* topo_st, bool lower_triangular, uint64_t expected ) { g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph status = nvgraphSetGraphStructure(handle, g1, (void*)topo_st, topo); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); uint64_t res = 0; status = nvgraphTriangleCount(handle, g1, &res); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //printf("Expected triangles: %" PRIu64 ", got triangles: %" PRIu64 "\n", expected, res); // get result ASSERT_EQ(expected, res); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } void run_star_test_csr() { int N = 1024; // min is 5 int n = N - 1; int nnz = 2 * (N - 1) ; std::vector<int> offsets(N+1), neighborhood(nnz); offsets[0] = 0; offsets[1] = 0; int cur_nnz = 0; for (int i = 1; i < N; i++) { for (int j = 0; j < i; j++) { if (j == 0 || j == i - 1 || (j == 1 && i == (N-1))) { neighborhood[cur_nnz] = j; cur_nnz++; } } offsets[i+1] = cur_nnz; } //offsets[n] = cur_nnz; /*printf("N = %d, n = %d, nnz = %d\n", N, n, nnz); for (int i = 0; i < N+1; i++) printf("RO [%d] == %d\n", i, offsets[i]); for (int i = 0; i < nnz; i++) printf("CI [%d] == %d\n", i, neighborhood[i]);*/ topo = NVGRAPH_CSR_32; nvgraphCSRTopology32I_st topology = {N, nnz, &offsets[0], &neighborhood[0]}; prepare_and_run((void*)&topology, true, n); } void run_seq_test_csr() { int N = 1024; // min is 3 int n = N - 2; // actual number of triangles int nnz = 2 * (N - 3) + 3; std::vector<int> offsets(N+1), neighborhood(nnz); offsets[0] = 0; int cur_nnz = 0; for (int i = 0; i < N; i++) { if (i > 1) { neighborhood[cur_nnz] = i - 2; cur_nnz++; } if (i > 0) { neighborhood[cur_nnz] = i - 1; cur_nnz++; } offsets[i+1] = cur_nnz; } //offsets[n] = cur_nnz; /*printf("N = %d, n = %d, nnz = %d\n", N, n, nnz); for (int i = 0; i < N+1; i++) printf("RO [%d] == %d\n", i, offsets[i]); for (int i = 0; i < nnz; i++) printf("CI [%d] == %d\n", i, neighborhood[i]);*/ topo = NVGRAPH_CSR_32; nvgraphCSRTopology32I_st topology = {N, nnz, &offsets[0], &neighborhood[0]}; prepare_and_run((void*)&topology, true, n); } }; typedef struct TriCount_Usecase_t { std::string graph_file; uint64_t ref_tricount; TriCount_Usecase_t(const std::string& a, uint64_t b) : ref_tricount(b) { graph_file = convert_to_local_path(a); }; TriCount_Usecase_t& operator=(const TriCount_Usecase_t& rhs) { graph_file = rhs.graph_file; ref_tricount = rhs.ref_tricount; return *this; } } TriCount_Usecase_t; class TriCountRefGraphCheck : public ::testing::TestWithParam<TriCount_Usecase_t> { public: TriCountRefGraphCheck() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; void run_current_test(const TriCount_Usecase_t& param) { const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file); nvgraphTopologyType_t topo = NVGRAPH_CSR_32; nvgraphStatus_t status; FILE* fpin = fopen(param.graph_file.c_str(),"rb"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes std::vector<int> read_row_ptr, read_col_ind; ASSERT_EQ(read_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind), 0); fclose(fpin); if (!enough_device_memory<char>(n, nnz, sizeof(int)*(read_row_ptr.size() + read_col_ind.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSRTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo); uint64_t res = 0; status = nvgraphTriangleCount(handle, g1, &res); cudaDeviceSynchronize(); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // run if (PERF && n > PERF_ROWS_LIMIT) { double start, stop; start = second(); start = second(); int repeat = complex_repeats; for (int i = 0; i < repeat; i++) { status = nvgraphTriangleCount(handle, g1, &res); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } cudaDeviceSynchronize(); stop = second(); printf("&&&& PERF Time_%s %10.8f -ms\n", test_id.c_str(), 1000.0*(stop-start)/repeat); } //printf("Expected triangles: %" PRIu64 ", got triangles: %" PRIu64 "\n", expected, res); ASSERT_EQ(param.ref_tricount, res); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(TriCountRefGraphCheck, CorrectnessCheck) { run_current_test(GetParam()); } INSTANTIATE_TEST_CASE_P(NVGraphCAPITests_TriCount, TriCountRefGraphCheck, // graph FILE reference number of triangles // // we read matrix stored in CSR and pass it as CSC - so matrix is in fact transposed, that's why we compare it to the results calculated on a transposed matrix ::testing::Values( TriCount_Usecase_t("graphs/triangles_counting/as-skitter_internet_topo.csr.bin" , 28769868) , TriCount_Usecase_t("graphs/triangles_counting/cage15_N_5154859.csr.bin" , 36106416 ) , TriCount_Usecase_t("graphs/triangles_counting/cit-Patents_N_3774768.csr.bin" , 7515023) , TriCount_Usecase_t("graphs/triangles_counting/coAuthorsCiteseer_N_227320.csr.bin" , 2713298) , TriCount_Usecase_t("graphs/triangles_counting/com-orkut_N_3072441.csr.bin" , 627584181) , TriCount_Usecase_t("graphs/triangles_counting/coPapersCiteseer.csr.bin" , 872040567) , TriCount_Usecase_t("graphs/triangles_counting/coPapersDBLP_N_540486.csr.bin" , 444095058) , TriCount_Usecase_t("graphs/triangles_counting/europe_osm_N_50912018.csr.bin" , 61710) , TriCount_Usecase_t("graphs/triangles_counting/hollywood-2009_N_1139905.csr.bin" , 4916374555) , TriCount_Usecase_t("graphs/triangles_counting/kron_g500-simple-logn16.csr.bin" , 118811321) , TriCount_Usecase_t("graphs/triangles_counting/kron_g500-simple-logn18.csr.bin" , 687677667) , TriCount_Usecase_t("graphs/triangles_counting/kron_g500-simple-logn21.csr.bin" , 8815649682) , TriCount_Usecase_t("graphs/triangles_counting/mouse_gene_N_45101.csr.bin" , 3619097862) , TriCount_Usecase_t("graphs/triangles_counting/road_central_N_14081816.csr.bin" , 228918) , TriCount_Usecase_t("graphs/triangles_counting/soc-LiveJournal1_N_4847571.csr.bin" , 285730264) , TriCount_Usecase_t("graphs/triangles_counting/wb-edu_N_9845725.csr.bin" , 254718147) ///// more instances ) ); TEST_F(NVGraphCAPITests_Triangles_Sanity, SanityStarCSR) { run_star_test_csr(); } TEST_F(NVGraphCAPITests_Triangles_Sanity, SanitySeqCSR) { run_seq_test_csr(); } int main(int argc, char **argv) { for (int i = 0; i < argc; i++) { if (strcmp(argv[i], "--perf") == 0) PERF = 1; if (strcmp(argv[i], "--ref-data-dir") == 0) ref_data_prefix = std::string(argv[i+1]); if (strcmp(argv[i], "--graph-data-dir") == 0) graph_data_prefix = std::string(argv[i+1]); } srand(42); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_benchmark.cpp
// This is gtest application that contains all of the C API tests. Parameters: // nvgraph_capi_tests [--perf] [--stress-iters N] [--gtest_filter=NameFilterPatter] // It also accepts any other gtest (1.7.0) default parameters. // Right now this application contains: // 1) Sanity Check tests - tests on simple examples with known answer (or known behaviour) // 2) Correctness checks tests - tests on real graph data, uses reference algorithm // (CPU code for SrSPMV and python scripts for other algorithms, see // python scripts here: //sw/gpgpu/nvgraph/test/ref/) with reference results, compares those two. // It also measures performance of single algorithm C API call, enf enabled (see below) // 3) Corner cases tests - tests with some bad inputs, bad parameters, expects library to handle // it gracefully // 4) Stress tests - makes sure that library result is persistent throughout the library usage // (a lot of C API calls). Also makes some assumptions and checks on memory usage during // this test. // // We can control what tests to launch by using gtest filters. For example: // Only sanity tests: // ./nvgraph_capi_tests --gtest_filter=*Sanity* // And, correspondingly: // ./nvgraph_capi_tests --gtest_filter=*Correctness* // ./nvgraph_capi_tests --gtest_filter=*Corner* // ./nvgraph_capi_tests --gtest_filter=*Stress* // Or, combination: // ./nvgraph_capi_tests --gtest_filter=*Sanity*:*Correctness* // // Performance reports are provided in the ERIS format and disabled by default. // Could be enabled by adding '--perf' to the command line. I added this parameter to vlct // // Parameter '--stress-iters N', which gives multiplier (not an absolute value) for the number of launches for stress tests // #include <utility> #include "nvgraph_test_common.h" #include "valued_csr_graph.hxx" #include "readMatrix.hxx" #include "nvgraphP.h" #include "nvgraph.h" #include "nvgraph_experimental.h" #include "stdlib.h" #include "stdint.h" #include <algorithm> extern "C" { #include "mmio.h" } #include "mm.hxx" // minimum vertices in the graph to perform perf measurements #define PERF_ROWS_LIMIT 10000 // number of repeats = multiplier/num_vertices #define SRSPMV_ITER_MULTIPLIER 1000000000 #define SSSP_ITER_MULTIPLIER 30000000 #define WIDEST_ITER_MULTIPLIER 30000000 #define PAGERANK_ITER_MULTIPLIER 300000000 // utility #define NVGRAPH_SAFE_CALL(call) \ {\ nvgraphStatus_t status = (call) ;\ if ( NVGRAPH_STATUS_SUCCESS != status )\ {\ std::cout << "Error #" << status << " in " << __FILE__ << ":" << __LINE__ << std::endl;\ exit(1);\ }\ } #define CUDA_SAFE_CALL(call) \ {\ cudaError_t status = (call) ;\ if ( cudaSuccess != status )\ {\ std::cout << "Error #" << status << " in " << __FILE__ << ":" << __LINE__ << std::endl;\ exit(1);\ }\ } template <typename T> struct nvgraph_Const; template <> struct nvgraph_Const<double> { static const cudaDataType_t Type = CUDA_R_64F; static const double inf; static const double tol; typedef union fpint { double f; unsigned long u; } fpint_st; }; const double nvgraph_Const<double>::inf = DBL_MAX; const double nvgraph_Const<double>::tol = 1e-6; // this is what we use as a tolerance in the algorithms, more precision than this is useless for CPU reference comparison template <> struct nvgraph_Const<float> { static const cudaDataType_t Type = CUDA_R_32F; static const float inf; static const float tol; typedef union fpint { float f; unsigned u; } fpint_st; }; const float nvgraph_Const<float>::inf = FLT_MAX; const float nvgraph_Const<float>::tol = 1e-4; template <> struct nvgraph_Const<int> { static const cudaDataType_t Type = CUDA_R_32I; static const int inf; static const int tol; }; const int nvgraph_Const<int>::inf = INT_MAX; const int nvgraph_Const<int>::tol = 0; typedef struct SrSPMV_Usecase_t { std::string graph_file; int repeats; SrSPMV_Usecase_t(const std::string& a, const int b) : graph_file(a), repeats(b){}; SrSPMV_Usecase_t& operator=(const SrSPMV_Usecase_t& rhs) { graph_file = rhs.graph_file; repeats = rhs.repeats; return *this; } } SrSPMV_Usecase; template <typename T> void run_srspmv_bench(const SrSPMV_Usecase& param) { std::cout << "Initializing nvGRAPH library..." << std::endl; nvgraphHandle_t handle = NULL; if (handle == NULL) { NVGRAPH_SAFE_CALL(nvgraphCreate(&handle)); } std::cout << "Reading input data..." << std::endl; FILE* fpin = fopen(param.graph_file.c_str(),"r"); if (fpin == NULL) { std::cout << "Cannot open input graph file: " << param.graph_file << std::endl; exit(1); } int m, n, nnz; MM_typecode mc; if(mm_properties<int>(fpin, 1, &mc, &m, &n, &nnz) != 0) { std::cout << "could not read Matrix Market file properties"<< "\n"; exit(1); } std::vector<int> read_row_ptr(n+1), read_col_ind(nnz), coo_row_ind(nnz); std::vector<T> csr_read_val(nnz); if(mm_to_coo<int,T>(fpin, 1, nnz, &coo_row_ind[0], &read_col_ind[0], &csr_read_val[0], NULL)) { std::cout << "could not read matrix data"<< "\n"; exit(1); } if(coo_to_csr<int,T> (n, n, nnz, &coo_row_ind[0], &read_col_ind[0], &csr_read_val[0], NULL, &read_row_ptr[0], NULL, NULL, NULL)) { std::cout << "could not covert COO to CSR "<< "\n"; exit(1); } //Read a transposed network in amgx binary format and the bookmark of dangling nodes /*if (read_header_amgx_csr_bin (fpin, n, nnz) != 0) { std::cout << "Error reading input file: " << param.graph_file << std::endl; exit(1); } std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); if (read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val) != 0) { std::cout << "Error reading input file: " << param.graph_file << std::endl; exit(1); }*/ fclose(fpin); std::cout << "Initializing data structures ..." << std::endl; nvgraphGraphDescr_t g1 = NULL; NVGRAPH_SAFE_CALL(nvgraphCreateGraphDescr(handle, &g1)); // set up graph nvgraphTopologyType_t topo = NVGRAPH_CSR_32; nvgraphCSRTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; NVGRAPH_SAFE_CALL(nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo)); // set up graph data std::vector<T> calculated_res(n); std::vector<T> data1(n), data2(n); for (int i = 0; i < n; i++) { data1[i] = (T)(1.0*rand()/RAND_MAX - 0.5); data2[i] = (T)(1.0*rand()/RAND_MAX - 0.5); //printf ("data1[%d]==%f, data2[%d]==%f\n", i, data1[i], i, data2[i]); } void* vertexptr[2] = {(void*)&data1[0], (void*)&data2[0]}; cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&csr_read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; int weight_index = 0; int x_index = 0; int y_index = 1; NVGRAPH_SAFE_CALL(nvgraphAllocateVertexData(handle, g1, 2, type_v )); NVGRAPH_SAFE_CALL(nvgraphSetVertexData(handle, g1, vertexptr[0], x_index )); NVGRAPH_SAFE_CALL(nvgraphSetVertexData(handle, g1, vertexptr[1], y_index )); NVGRAPH_SAFE_CALL(nvgraphAllocateEdgeData(handle, g1, 1, type_e)); NVGRAPH_SAFE_CALL(nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], weight_index )); // run double start, stop, total = 0.; T alphaT = 1., betaT = 0.; nvgraphSemiring_t sr = NVGRAPH_PLUS_TIMES_SR; int repeat = std::max(param.repeats, 1); NVGRAPH_SAFE_CALL(nvgraphSrSpmv(handle, g1, weight_index, (void*)&alphaT, x_index, (void*)&betaT, y_index, sr)); NVGRAPH_SAFE_CALL(nvgraphSrSpmv(handle, g1, weight_index, (void*)&alphaT, x_index, (void*)&betaT, y_index, sr)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); std::cout << "Running spmv for " << repeat << " times..." << std::endl; std::cout << "n = " << n << ", nnz = " << nnz << std::endl; for (int i = 0; i < repeat; i++) { start = second(); start = second(); NVGRAPH_SAFE_CALL(nvgraphSrSpmv(handle, g1, weight_index, (void*)&alphaT, x_index, (void*)&betaT, y_index, sr)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); stop = second(); total += stop - start; } std::cout << "nvgraph time = " << 1000.*total/((double)repeat) << std::endl; NVGRAPH_SAFE_CALL(nvgraphDestroyGraphDescr(handle, g1)); if (handle != NULL) { NVGRAPH_SAFE_CALL(nvgraphDestroy(handle)); handle = NULL; } } typedef struct WidestPath_Usecase_t { std::string graph_file; int source_vert; int repeats; WidestPath_Usecase_t(const std::string& a, int b, const int c) : graph_file(a), source_vert(b), repeats(c){}; WidestPath_Usecase_t& operator=(const WidestPath_Usecase_t& rhs) { graph_file = rhs.graph_file; source_vert = rhs.source_vert; repeats = rhs.repeats; return *this; } } WidestPath_Usecase; // ref functions taken from cuSparse template <typename T_ELEM> void ref_csr2csc (int m, int n, int nnz, const T_ELEM *csrVals, const int *csrRowptr, const int *csrColInd, T_ELEM *cscVals, int *cscRowind, int *cscColptr, int base=0){ int i,j, row, col, index; int * counters; T_ELEM val; /* early return */ if ((m <= 0) || (n <= 0) || (nnz <= 0)){ return; } /* build compressed column pointers */ memset(cscColptr, 0, (n+1)*sizeof(cscColptr[0])); cscColptr[0]=base; for (i=0; i<nnz; i++){ cscColptr[1+csrColInd[i]-base]++; } for(i=0; i<n; i++){ cscColptr[i+1]+=cscColptr[i]; } /* expand row indecis and copy them and values into csc arrays according to permutation */ counters = (int *)malloc(n*sizeof(counters[0])); memset(counters, 0, n*sizeof(counters[0])); for (i=0; i<m; i++){ for (j=csrRowptr[i]; j<csrRowptr[i+1]; j++){ row = i+base; col = csrColInd[j-base]; index=cscColptr[col-base]-base+counters[col-base]; counters[col-base]++; cscRowind[index]=row; if(csrVals!=NULL || cscVals!=NULL){ val = csrVals[j-base]; cscVals[index] = val; } } } free(counters); } template <typename T> void run_widest_bench(const WidestPath_Usecase& param) { std::cout << "Initializing nvGRAPH library..." << std::endl; nvgraphHandle_t handle = NULL; if (handle == NULL) { NVGRAPH_SAFE_CALL(nvgraphCreate(&handle)); } nvgraphTopologyType_t topo = NVGRAPH_CSC_32; std::cout << "Reading input data..." << std::endl; FILE* fpin = fopen(param.graph_file.c_str(),"r"); if (fpin == NULL) { std::cout << "Cannot open input graph file: " << param.graph_file << std::endl; exit(1); } int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes if (read_header_amgx_csr_bin (fpin, n, nnz) != 0) { std::cout << "Error reading input file: " << param.graph_file << std::endl; exit(1); } std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); if (read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val) != 0) { std::cout << "Error reading input file: " << param.graph_file << std::endl; exit(1); } fclose(fpin); std::cout << "Initializing data structures ..." << std::endl; nvgraphGraphDescr_t g1 = NULL; NVGRAPH_SAFE_CALL(nvgraphCreateGraphDescr(handle, &g1)); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; NVGRAPH_SAFE_CALL(nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo)); // set up graph data size_t numsets = 1; std::vector<T> calculated_res(n); //void* vertexptr[1] = {(void*)&calculated_res[0]}; cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; NVGRAPH_SAFE_CALL(nvgraphAllocateVertexData(handle, g1, numsets, type_v)); NVGRAPH_SAFE_CALL(nvgraphAllocateEdgeData(handle, g1, numsets, type_e )); NVGRAPH_SAFE_CALL(nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0 )); int weight_index = 0; int source_vert = param.source_vert; int widest_path_index = 0; // run std::cout << "Running algorithm..." << std::endl; double start, stop; start = second(); start = second(); int repeat = std::max(param.repeats, 1); for (int i = 0; i < repeat; i++) NVGRAPH_SAFE_CALL(nvgraphWidestPath(handle, g1, weight_index, &source_vert, widest_path_index)); stop = second(); printf("Time of single WidestPath call is %10.8fsecs\n", (stop-start)/repeat); NVGRAPH_SAFE_CALL(nvgraphDestroyGraphDescr(handle, g1)); if (handle != NULL) { NVGRAPH_SAFE_CALL(nvgraphDestroy(handle)); handle = NULL; } } typedef struct SSSP_Usecase_t { std::string graph_file; int source_vert; int repeats; SSSP_Usecase_t(const std::string& a, int b, int c) : graph_file(a), source_vert(b), repeats(c){}; SSSP_Usecase_t& operator=(const SSSP_Usecase_t& rhs) { graph_file = rhs.graph_file; source_vert = rhs.source_vert; repeats = rhs.repeats; return *this; } } SSSP_Usecase; template <typename T> void run_sssp_bench(const SSSP_Usecase& param) { std::cout << "Initializing nvGRAPH library..." << std::endl; nvgraphHandle_t handle = NULL; if (handle == NULL) { NVGRAPH_SAFE_CALL(nvgraphCreate(&handle)); } nvgraphTopologyType_t topo = NVGRAPH_CSC_32; std::cout << "Reading input data..." << std::endl; FILE* fpin = fopen(param.graph_file.c_str(),"r"); if (fpin == NULL) { std::cout << "Cannot read input graph file: " << param.graph_file << std::endl; exit(1); } int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes if (read_header_amgx_csr_bin (fpin, n, nnz) != 0) { std::cout << "Error reading input file: " << param.graph_file << std::endl; exit(1); } std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); if (read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, read_val) != 0) { std::cout << "Error reading input file: " << param.graph_file << std::endl; exit(1); } fclose(fpin); std::cout << "Initializing data structures ..." << std::endl; nvgraphGraphDescr_t g1 = NULL; NVGRAPH_SAFE_CALL(nvgraphCreateGraphDescr(handle, &g1)); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; NVGRAPH_SAFE_CALL(nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo)); // set up graph data size_t numsets = 1; cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; NVGRAPH_SAFE_CALL(nvgraphAllocateVertexData(handle, g1, numsets, type_v)); NVGRAPH_SAFE_CALL(nvgraphAllocateEdgeData(handle, g1, numsets, type_e )); NVGRAPH_SAFE_CALL(nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0)); int weight_index = 0; int source_vert = param.source_vert; int sssp_index = 0; // run std::cout << "Running algorithm ..." << std::endl; double start, stop; start = second(); start = second(); int repeat = std::max(param.repeats, 1); for (int i = 0; i < repeat; i++) NVGRAPH_SAFE_CALL(nvgraphSssp(handle, g1, weight_index, &source_vert, sssp_index)); stop = second(); printf("Time of single SSSP call is %10.8fsecs\n", (stop-start)/repeat); NVGRAPH_SAFE_CALL(nvgraphDestroyGraphDescr(handle, g1)); if (handle != NULL) { NVGRAPH_SAFE_CALL(nvgraphDestroy(handle)); handle = NULL; } } typedef struct Traversal_Usecase_t { std::string graph_file; int source_vert; int repeats; Traversal_Usecase_t(const std::string& a, int b, int c) : graph_file(a), source_vert(b), repeats(c){}; Traversal_Usecase_t& operator=(const Traversal_Usecase_t& rhs) { graph_file = rhs.graph_file; source_vert = rhs.source_vert; repeats = rhs.repeats; return *this; } } Traversal_Usecase; template <typename T> void run_traversal_bench(const Traversal_Usecase& param) { std::cout << "Initializing nvGRAPH library..." << std::endl; nvgraphHandle_t handle = NULL; if (handle == NULL) { NVGRAPH_SAFE_CALL(nvgraphCreate(&handle)); } nvgraphTopologyType_t topo = NVGRAPH_CSR_32; std::cout << "Reading input data..." << std::endl; FILE* fpin = fopen(param.graph_file.c_str(),"r"); if (fpin == NULL) { std::cout << "Cannot read input graph file: " << param.graph_file << std::endl; exit(1); } //Read a transposed network in amgx binary format and the bookmark of dangling nodes /* if (read_header_amgx_csr_bin (fpin, n, nnz) != 0) { std::cout << "Error reading input file: " << param.graph_file << std::endl; exit(1); } if (read_data_amgx_csr_bin (fpin, n, nnz, read_row_ptr, read_col_ind, csr_read_val) != 0) { std::cout << "Error reading input file: " << param.graph_file << std::endl; exit(1); } fclose(fpin); */ int m, n, nnz; MM_typecode mc; if(mm_properties<int>(fpin, 1, &mc, &m, &n, &nnz) != 0) { std::cout << "could not read Matrix Market file properties"<< "\n"; exit(1); } std::vector<int> read_row_ptr(n+1), read_col_ind(nnz), coo_row_ind(nnz); std::vector<T> csr_read_val(nnz); if(mm_to_coo<int,T>(fpin, 1, nnz, &coo_row_ind[0], &read_col_ind[0], &csr_read_val[0], NULL)) { std::cout << "could not read matrix data"<< "\n"; exit(1); } if(coo_to_csr<int,T> (n, n, nnz, &coo_row_ind[0], &read_col_ind[0], &csr_read_val[0], NULL, &read_row_ptr[0], NULL, NULL, NULL)) { std::cout << "could not covert COO to CSR "<< "\n"; exit(1); } std::cout << "Initializing data structures ..." << std::endl; nvgraphGraphDescr_t g1 = NULL; NVGRAPH_SAFE_CALL(nvgraphCreateGraphDescr(handle, &g1)); // set up graph nvgraphCSRTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; NVGRAPH_SAFE_CALL(nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo)); // set up graph data size_t numsets = 1; cudaDataType_t type_v[1] = {nvgraph_Const<int>::Type}; NVGRAPH_SAFE_CALL(nvgraphAllocateVertexData(handle, g1, numsets, type_v)); int source_vert = param.source_vert; nvgraphTraversalParameter_t traversal_param; nvgraphTraversalParameterInit(&traversal_param); nvgraphTraversalSetDistancesIndex(&traversal_param, 0); // run std::cout << "Running algorithm ..." << std::endl; double start, stop; start = second(); start = second(); int repeat = std::max(param.repeats, 1); for (int i = 0; i < repeat; i++) NVGRAPH_SAFE_CALL(nvgraphTraversal(handle, g1, NVGRAPH_TRAVERSAL_BFS, &source_vert, traversal_param)); stop = second(); printf("Time of single Traversal call is %10.8fsecs\n", (stop-start)/repeat); NVGRAPH_SAFE_CALL(nvgraphDestroyGraphDescr(handle, g1)); if (handle != NULL) { NVGRAPH_SAFE_CALL(nvgraphDestroy(handle)); handle = NULL; } } typedef struct Pagerank_Usecase_t { std::string graph_file; float alpha; int repeats; int max_iters; double tolerance; Pagerank_Usecase_t(const std::string& a, float b, const int c, const int d, const double e) : graph_file(a), alpha(b), repeats(c), max_iters(d), tolerance(e) {}; Pagerank_Usecase_t& operator=(const Pagerank_Usecase_t& rhs) { graph_file = rhs.graph_file; alpha = rhs.alpha; repeats = rhs.repeats; max_iters = rhs.max_iters; tolerance = rhs.tolerance; return *this; } } Pagerank_Usecase; template <typename T> void run_pagerank_bench(const Pagerank_Usecase& param) { std::cout << "Initializing nvGRAPH library..." << std::endl; nvgraphHandle_t handle = NULL; if (handle == NULL) { NVGRAPH_SAFE_CALL(nvgraphCreate(&handle)); } nvgraphTopologyType_t topo = NVGRAPH_CSC_32; std::cout << "Reading input data..." << std::endl; FILE* fpin = fopen(param.graph_file.c_str(),"r"); if (fpin == NULL) { std::cout << "Cannot open input graph file: " << param.graph_file << std::endl; exit(1); } int n, nnz; //Read a transposed network in amgx binary format and the bookmark of dangling nodes if (read_header_amgx_csr_bin (fpin, n, nnz) != 0) { std::cout << "Cannot read input graph file: " << param.graph_file << std::endl; exit(1); } std::vector<int> read_row_ptr(n+1), read_col_ind(nnz); std::vector<T> read_val(nnz); std::vector<T> dangling(n); if (read_data_amgx_csr_bin_rhs (fpin, n, nnz, read_row_ptr, read_col_ind, read_val, dangling) != 0) { std::cout << "Cannot read input graph file: " << param.graph_file << std::endl; exit(1); } fclose(fpin); std::cout << "Initializing data structures ..." << std::endl; nvgraphGraphDescr_t g1 = NULL; NVGRAPH_SAFE_CALL(nvgraphCreateGraphDescr(handle, &g1)); // set up graph nvgraphCSCTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; NVGRAPH_SAFE_CALL(nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo)); // set up graph data std::vector<T> calculated_res(n, (T)1.0/n); void* vertexptr[2] = {(void*)&dangling[0], (void*)&calculated_res[0]}; cudaDataType_t type_v[2] = {nvgraph_Const<T>::Type, nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&read_val[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; NVGRAPH_SAFE_CALL(nvgraphAllocateVertexData(handle, g1, 2, type_v)); NVGRAPH_SAFE_CALL(nvgraphSetVertexData(handle, g1, vertexptr[0], 0 )); NVGRAPH_SAFE_CALL(nvgraphSetVertexData(handle, g1, vertexptr[1], 1 )); NVGRAPH_SAFE_CALL(nvgraphAllocateEdgeData(handle, g1, 1, type_e )); NVGRAPH_SAFE_CALL(nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0 )); int bookmark_index = 0; int weight_index = 0; T alpha = param.alpha; int pagerank_index = 1; int has_guess = 0; float tolerance = (T)param.tolerance; int max_iter = param.max_iters; std::cout << "Running algorithm ..." << std::endl; // run double start, stop; start = second(); start = second(); int repeat = std::max(param.repeats, 1); for (int i = 0; i < repeat; i++) NVGRAPH_SAFE_CALL(nvgraphPagerank(handle, g1, weight_index, (void*)&alpha, bookmark_index, has_guess, pagerank_index, tolerance, max_iter)); stop = second(); printf("Time of single Pargerank call is %10.8fsecs\n", (stop-start)/repeat); NVGRAPH_SAFE_CALL(nvgraphDestroyGraphDescr(handle, g1)); if (handle != NULL) { NVGRAPH_SAFE_CALL(nvgraphDestroy(handle)); handle = NULL; } } typedef struct ModMax_Usecase_t { std::string graph_file; int clusters; int evals; int repeats; ModMax_Usecase_t(const std::string& a, int b, int c, int d) : graph_file(a), clusters(b), evals(c), repeats(d){}; ModMax_Usecase_t& operator=(const ModMax_Usecase_t& rhs) { graph_file = rhs.graph_file; clusters = rhs.clusters; evals = rhs.evals; repeats = rhs.repeats; return *this; } } ModMax_Usecase; template <typename T> void run_modularity_bench(const ModMax_Usecase& param) { // this function prints : // #clusters,time in ms,modularity nvgraphHandle_t handle = NULL; NVGRAPH_SAFE_CALL(nvgraphCreate(&handle)); int m, n, nnz; MM_typecode mc; FILE* fpin = fopen(param.graph_file.c_str(),"r"); mm_properties<int>(fpin, 1, &mc, &m, &n, &nnz) ; // Allocate memory on host std::vector<int> cooRowIndA(nnz); std::vector<int> csrColIndA(nnz); std::vector<int> csrRowPtrA(n+1); std::vector<T> csrValA(nnz); mm_to_coo<int,T>(fpin, 1, nnz, &cooRowIndA[0], &csrColIndA[0], &csrValA[0],NULL) ; coo_to_csr<int,T> (n, n, nnz, &cooRowIndA[0], &csrColIndA[0], &csrValA[0], NULL, &csrRowPtrA[0], NULL, NULL, NULL); fclose(fpin); //remove diagonal for (int i = 0; i < n; i++) for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++) if (csrColIndA[j]==i) csrValA[j] = 0.0; nvgraphGraphDescr_t g1 = NULL; struct SpectralClusteringParameter clustering_params; clustering_params.n_clusters = param.clusters; clustering_params.n_eig_vects = param.evals; clustering_params.algorithm = NVGRAPH_MODULARITY_MAXIMIZATION; clustering_params.evs_tolerance = 0.0f ; clustering_params.evs_max_iter = 0; clustering_params.kmean_tolerance = 0.0f; clustering_params.kmean_max_iter = 0; int weight_index = 0; //std::vector<T> clustering_h(n); //std::vector<T> eigVals_h(clustering_params.n_clusters); //std::vector<T> eigVecs_h(n*clustering_params.n_clusters); //could also be on device int *clustering_d; cudaMalloc((void**)&clustering_d , n*sizeof(int)); T* eigVals_d; cudaMalloc((void**)&eigVals_d, clustering_params.n_clusters*sizeof(T)); T* eigVecs_d; cudaMalloc((void**)&eigVecs_d, n*clustering_params.n_clusters*sizeof(T)); NVGRAPH_SAFE_CALL( nvgraphCreateGraphDescr(handle, &g1)); // set up graph nvgraphCSRTopology32I_st topology = {n, nnz, &csrRowPtrA[0], &csrColIndA[0]}; NVGRAPH_SAFE_CALL( nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSR_32)); // set up graph data size_t numsets = 1; void* edgeptr[1] = {(void*)&csrValA[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; NVGRAPH_SAFE_CALL( nvgraphAllocateEdgeData(handle, g1, numsets, type_e )); NVGRAPH_SAFE_CALL( nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0 )); printf("%d,", clustering_params.n_clusters); double start, stop; start = second(); int repeat = std::max(param.repeats, 1); for (int i = 0; i < repeat; i++) // NVGRAPH_SAFE_CALL(nvgraphSpectralClustering(handle, g1, weight_index, &clustering_params, (int*)&clustering_h[0], (void*)&eigVals_h[0], (void*)&eigVecs_h[0])); NVGRAPH_SAFE_CALL(nvgraphSpectralClustering(handle, g1, weight_index, &clustering_params, clustering_d, eigVals_d, eigVecs_d)); //for (int i = 0; i < repeat; i++) // NVGRAPH_SAFE_CALL( nvgraphSpectralModularityMaximization(handle, g1, weight_index, clustering_params.n_clusters, clustering_params.n_eig_vects, 0.0f, 0, 0.0f, 0, clustering_d, (void*)&eigVals_h[0], (void*)&eigVecs_h[0])); //for (int i = 0; i < repeat; i++) // NVGRAPH_SAFE_CALL( nvgraphBalancedCutClustering(handle, g1, weight_index, clustering_params.n_clusters, clustering_params.n_eig_vects, 0, 0.0f, 0, 0.0f, 0, clustering_d, (void*)&eigVals_h[0], (void*)&eigVecs_h[0])); stop = second(); printf("%10.8f,", 1000.0*(stop-start)/repeat); //Print //std::vector<int> clust_h(n); //cudaMemcpy(&clust_h[0], clustering_d,n*sizeof(int),cudaMemcpyDeviceToHost); //printf("\n "); //for (int i = 0; i < n; ++i) // printf("%d ", clust_h [i]); //printf("\n "); //for (int i = 0; i < clustering_params.n_clusters; ++i) // std::cout << eigVals_h[i]<< ' ' ; //printf("\n "); //std::cout<< std::endl; //std::cout << std::endl; //for (int i = 0; i < clustering_params.n_clusters; ++i) //{ // for (int j = 0; j < 10; ++j) // std::cout << eigVecs_h[i*n+j] << ' '; // std::cout<< std::endl; //} // Analyse quality float score =0.0; nvgraphAnalyzeClustering(handle, g1, weight_index, clustering_params.n_clusters, clustering_d, NVGRAPH_MODULARITY, &score); printf("%f\n", score); // ratio cut // float ec =0.0, rc =0.0; // NVGRAPH_SAFE_CALL(nvgraphAnalyzeBalancedCut(handle, g1, weight_index, clustering_params.n_clusters, clustering_d, &ec, &rc)); // printf("%f,", rc); // // Synthetic random // for (int i=0; i<n; i++) // { // parts_h[i] = rand() % clustering_params.n_clusters; // //printf("%d ", parts_h[i]); // } // // Analyse quality // cudaMemcpy(clustering_d,&parts_h[0],n*sizeof(int),cudaMemcpyHostToDevice); // //NVGRAPH_SAFE_CALL( nvgraphAnalyzeModularityClustering(handle, g1, weight_index, clustering_params.n_clusters, clustering_d, &modularity1)); // //printf("%f\n", modularity1); // NVGRAPH_SAFE_CALL(nvgraphAnalyzeBalancedCut(handle, g1, weight_index, clustering_params.n_clusters, clustering_d, &ec, &rc)); // printf("%f\n", rc); //exit cudaFree(clustering_d); cudaFree(eigVals_d); cudaFree(eigVecs_d); NVGRAPH_SAFE_CALL(nvgraphDestroyGraphDescr(handle, g1)); } typedef struct BalancedCut_Usecase_t { std::string graph_file; int clusters; int evals; int repeats; BalancedCut_Usecase_t(const std::string& a, int b, int c, int d) : graph_file(a), clusters(b), evals(c), repeats(d){}; BalancedCut_Usecase_t& operator=(const BalancedCut_Usecase_t& rhs) { graph_file = rhs.graph_file; clusters = rhs.clusters; evals = rhs.evals; repeats = rhs.repeats; return *this; } } BalancedCut_Usecase; template <typename T> void run_balancedCut_bench(const BalancedCut_Usecase& param) { // this function prints : // #clusters,time in ms,rc nvgraphHandle_t handle = NULL; NVGRAPH_SAFE_CALL(nvgraphCreate(&handle)); int m, n, nnz; MM_typecode mc; FILE* fpin = fopen(param.graph_file.c_str(),"r"); mm_properties<int>(fpin, 1, &mc, &m, &n, &nnz) ; // Allocate memory on host std::vector<int> cooRowIndA(nnz); std::vector<int> csrColIndA(nnz); std::vector<int> csrRowPtrA(n+1); std::vector<T> csrValA(nnz); mm_to_coo<int,T>(fpin, 1, nnz, &cooRowIndA[0], &csrColIndA[0], &csrValA[0],NULL) ; coo_to_csr<int,T> (n, n, nnz, &cooRowIndA[0], &csrColIndA[0], &csrValA[0], NULL, &csrRowPtrA[0], NULL, NULL, NULL); fclose(fpin); //remove diagonal for (int i = 0; i < n; i++) for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++) if (csrColIndA[j]==i) csrValA[j] = 0.0; nvgraphGraphDescr_t g1 = NULL; struct SpectralClusteringParameter clustering_params; clustering_params.n_clusters = param.clusters; clustering_params.n_eig_vects = param.evals; clustering_params.algorithm = NVGRAPH_BALANCED_CUT_LANCZOS; clustering_params.evs_tolerance = 0.0f ; clustering_params.evs_max_iter = 0; clustering_params.kmean_tolerance = 0.0f; clustering_params.kmean_max_iter = 0; int weight_index = 0; //std::vector<T> clustering_h(n); //std::vector<T> eigVals_h(clustering_params.n_clusters); //std::vector<T> eigVecs_h(n*clustering_params.n_clusters); //could also be on device int *clustering_d; cudaMalloc((void**)&clustering_d , n*sizeof(int)); T* eigVals_d; cudaMalloc((void**)&eigVals_d, clustering_params.n_clusters*sizeof(T)); T* eigVecs_d; cudaMalloc((void**)&eigVecs_d, n*clustering_params.n_clusters*sizeof(T)); NVGRAPH_SAFE_CALL( nvgraphCreateGraphDescr(handle, &g1)); // set up graph nvgraphCSRTopology32I_st topology = {n, nnz, &csrRowPtrA[0], &csrColIndA[0]}; NVGRAPH_SAFE_CALL( nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSR_32)); // set up graph data size_t numsets = 1; void* edgeptr[1] = {(void*)&csrValA[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; NVGRAPH_SAFE_CALL( nvgraphAllocateEdgeData(handle, g1, numsets, type_e )); NVGRAPH_SAFE_CALL( nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0 )); printf("%d,", clustering_params.n_clusters); double start, stop; start = second(); int repeat = std::max(param.repeats, 1); for (int i = 0; i < repeat; i++) // NVGRAPH_SAFE_CALL(nvgraphSpectralClustering(handle, g1, weight_index, &clustering_params, (int*)&clustering_h[0], (void*)&eigVals_h[0], (void*)&eigVecs_h[0])); NVGRAPH_SAFE_CALL(nvgraphSpectralClustering(handle, g1, weight_index, &clustering_params, clustering_d, eigVals_d, eigVecs_d)); stop = second(); printf("%10.8f,", 1000.0*(stop-start)/repeat); // Analyse quality float score =0.0; nvgraphAnalyzeClustering(handle, g1, weight_index, clustering_params.n_clusters, clustering_d, NVGRAPH_RATIO_CUT, &score); printf("%f\n", score); //exit cudaFree(clustering_d); cudaFree(eigVals_d); cudaFree(eigVecs_d); NVGRAPH_SAFE_CALL(nvgraphDestroyGraphDescr(handle, g1)); } typedef struct TriCount_Usecase_t { std::string graph_file; int repeats; TriCount_Usecase_t(const std::string& a, const int b) : graph_file(a), repeats(b){}; TriCount_Usecase_t& operator=(const TriCount_Usecase_t& rhs) { graph_file = rhs.graph_file; repeats = rhs.repeats; return *this; } } TriCount_Usecase; void run_tricount_bench(const TriCount_Usecase& param) { std::cout << "Initializing nvGRAPH library..." << std::endl; nvgraphHandle_t handle = NULL; if (handle == NULL) { NVGRAPH_SAFE_CALL(nvgraphCreate(&handle)); } nvgraphTopologyType_t topo = NVGRAPH_CSR_32; std::cout << "Reading input data..." << std::endl; FILE* fpin = fopen(param.graph_file.c_str(),"rb"); if (fpin == NULL) { std::cout << "Cannot open input graph file: " << param.graph_file << std::endl; exit(1); } int n, nnz; std::vector<int> read_row_ptr, read_col_ind; //Read CSR of lower triangular of undirected graph if (read_csr_bin<int> (fpin, n, nnz, read_row_ptr, read_col_ind) != 0) { std::cout << "Error reading input file: " << param.graph_file << std::endl; exit(1); } fclose(fpin); std::cout << "Initializing data structures ..." << std::endl; nvgraphGraphDescr_t g1 = NULL; NVGRAPH_SAFE_CALL(nvgraphCreateGraphDescr(handle, &g1)); // set up graph nvgraphCSRTopology32I_st topology = {n, nnz, &read_row_ptr[0], &read_col_ind[0]}; NVGRAPH_SAFE_CALL(nvgraphSetGraphStructure(handle, g1, (void*)&topology, topo)); // set up graph data uint64_t res = 0; // run std::cout << "Running algorithm..." << std::endl; double start, stop; start = second(); start = second(); int repeat = std::max(param.repeats, 1); for (int i = 0; i < repeat; i++) NVGRAPH_SAFE_CALL(nvgraphTriangleCount(handle, g1, &res)); stop = second(); printf("Number of triangles counted: %lli\n", (long long int)res); printf("Time of single TriangleCount call is %10.8fsecs\n", (stop-start)/repeat); NVGRAPH_SAFE_CALL(nvgraphDestroyGraphDescr(handle, g1)); if (handle != NULL) { NVGRAPH_SAFE_CALL(nvgraphDestroy(handle)); handle = NULL; } } int findParamIndex(const char** argv, int argc, const char* parm) { int count = 0; int index = -1; for (int i = 0; i < argc; i++) { if (strncmp(argv[i], parm, 100)==0) { index = i; count++; } } if (count == 0 || count == 1) { return index; } else { printf("Error, parameter %s has been specified more than once, exiting\n",parm); exit(1); } return -1; } int main(int argc, const char **argv) { int pidx = 0; int repeats = 100; if (argc < 2 || findParamIndex(argv, argc, "--help") != -1) { printf("Usage: \n"); printf(" nvgraph_benchmark [--double|--float] [--repeats N] --spmv graph_file \n"); printf(" nvgraph_benchmark [--double|--float] [--repeats N] --widest graph_file start_vertex \n"); printf(" nvgraph_benchmark [--double|--float] [--repeats N] --sssp graph_file start_vertex \n"); printf(" nvgraph_benchmark [--double|--float] [--repeats N] --pagerank graph_file alpha max_iters tolerance \n"); printf(" nvgraph_benchmark [--double|--float] [--repeats N] --modularity graph_file nb_clusters nb_eigvals \n"); printf(" nvgraph_benchmark [--double|--float] [--repeats N] --traversal graph_file start_vertex \n"); printf(" nvgraph_benchmark [--double|--float] [--repeats N] --balancedCut graph_file nb_clusters nb_eigvals \n"); printf(" nvgraph_benchmark [--repeats N] --tricount graph_file \n"); exit(0); } if ( (pidx = findParamIndex(argv, argc, "--repeats")) != -1) { repeats = atoi(argv[pidx+1]); } if (findParamIndex(argv, argc, "--double") != -1 || findParamIndex(argv, argc, "--float") == -1) { if ((pidx = findParamIndex(argv, argc, "--widest")) != -1) { run_widest_bench<double>(WidestPath_Usecase(argv[pidx+1], atoi(argv[pidx+2]), repeats)); } else if ((pidx = findParamIndex(argv, argc, "--spmv")) != -1) { run_srspmv_bench<double>(SrSPMV_Usecase(argv[pidx+1], repeats)); } else if ((pidx = findParamIndex(argv, argc, "--sssp")) != -1) { run_sssp_bench<double>(SSSP_Usecase(argv[pidx+1], atoi(argv[pidx+2]), repeats)); } else if ((pidx = findParamIndex(argv, argc, "--pagerank")) != -1) { run_pagerank_bench<double>(Pagerank_Usecase(argv[pidx+1], atof(argv[pidx+2]), repeats, atoi(argv[pidx+3]), atof(argv[pidx+4]))); } else if ((pidx = findParamIndex(argv, argc, "--modularity")) != -1) { run_modularity_bench<double>(ModMax_Usecase(argv[pidx+1], atoi(argv[pidx+2]), atoi(argv[pidx+3]), repeats)); } else if ((pidx = findParamIndex(argv, argc, "--traversal")) != -1) { run_traversal_bench<double>(Traversal_Usecase(argv[pidx+1], atoi(argv[pidx+2]), repeats)); } else if ((pidx = findParamIndex(argv, argc, "--balancedCut")) != -1) { run_balancedCut_bench<double>(BalancedCut_Usecase(argv[pidx+1], atoi(argv[pidx+2]), atoi(argv[pidx+3]), repeats)); } else if ((pidx = findParamIndex(argv, argc, "--tricount")) != -1) { run_tricount_bench(TriCount_Usecase(argv[pidx+1], repeats)); } else { printf("Specify one of the algorithms: '--widest', '--sssp', '--pagerank', '--modularity', '--balancedCut', '--traversal', or 'tricount'\n"); } } else { if ((pidx = findParamIndex(argv, argc, "--widest")) != -1) { run_widest_bench<float>(WidestPath_Usecase(argv[pidx+1], atoi(argv[pidx+2]), repeats)); } else if ((pidx = findParamIndex(argv, argc, "--spmv")) != -1) { run_srspmv_bench<float>(SrSPMV_Usecase(argv[pidx+1], repeats)); } else if ((pidx = findParamIndex(argv, argc, "--sssp")) != -1) { run_sssp_bench<float>(SSSP_Usecase(argv[pidx+1], atoi(argv[pidx+2]), repeats)); } else if ((pidx = findParamIndex(argv, argc, "--pagerank")) != -1) { run_pagerank_bench<float>(Pagerank_Usecase(argv[pidx+1], atof(argv[pidx+2]), repeats, atoi(argv[pidx+3]), atof(argv[pidx+4]))); } else if ((pidx = findParamIndex(argv, argc, "--modularity")) != -1) { run_modularity_bench<float>(ModMax_Usecase(argv[pidx+1], atoi(argv[pidx+2]), atoi(argv[pidx+3]), repeats)); } else if ((pidx = findParamIndex(argv, argc, "--traversal")) != -1) { run_traversal_bench<float>(Traversal_Usecase(argv[pidx+1], atoi(argv[pidx+2]), repeats)); } else if ((pidx = findParamIndex(argv, argc, "--balancedCut")) != -1) { run_balancedCut_bench<float>(BalancedCut_Usecase(argv[pidx+1], atoi(argv[pidx+2]), atoi(argv[pidx+3]), repeats)); } else { printf("Specify one of the algorithms: '--widest', '--sssp' , '--pagerank', '--modularity', '--balancedCut' or '--traversal'\n"); } } return 0; }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_capi_tests_conversion.cpp
#include <vector> // #include "boost/tuple/tuple.hpp" #include <algorithm> #include <stdlib.h> #include <time.h> #include <limits> #include "gtest/gtest.h" #include "nvgraph.h" #include <valued_csr_graph.hxx> #include <multi_valued_csr_graph.hxx> #include <nvgraphP.h> // private header, contains structures, and potentially other things, used in the public C API that should never be exposed. #include "convert_preset_testcases.h" #define DEBUG_MSG std::cout << "-----------> " << __FILE__ << " " << __LINE__ << std::endl; #define DEBUG_VAR(var) std::cout << "-----------> " << __FILE__ << " " << __LINE__ << ": " << #var"=" << var << std::endl; typedef enum { CSR_32 = 0, CSC_32 = 1, COO_DEFAULT_32 = 2, COO_UNSORTED_32 = 3, COO_SOURCE_32 = 4, COO_DESTINATION_32 = 5 } testTopologyType_t; // ref functions taken from cuSparse template <typename T_ELEM> void ref_csr2csc (int m, int n, int nnz, const T_ELEM *csrVals, const int *csrRowptr, const int *csrColInd, T_ELEM *cscVals, int *cscRowind, int *cscColptr, int base=0){ int i,j, row, col, index; int * counters; T_ELEM val; /* early return */ if ((m <= 0) || (n <= 0) || (nnz <= 0)){ return; } /* build compressed column pointers */ memset(cscColptr, 0, (n+1)*sizeof(cscColptr[0])); cscColptr[0]=base; for (i=0; i<nnz; i++){ cscColptr[1+csrColInd[i]-base]++; } for(i=0; i<n; i++){ cscColptr[i+1]+=cscColptr[i]; } /* expand row indecis and copy them and values into csc arrays according to permutation */ counters = (int *)malloc(n*sizeof(counters[0])); memset(counters, 0, n*sizeof(counters[0])); for (i=0; i<m; i++){ for (j=csrRowptr[i]; j<csrRowptr[i+1]; j++){ row = i+base; col = csrColInd[j-base]; index=cscColptr[col-base]-base+counters[col-base]; counters[col-base]++; cscRowind[index]=row; if(csrVals!=NULL || cscVals!=NULL){ val = csrVals[j-base]; cscVals[index] = val; } } } free(counters); } // Not from cusparse (nvbug: 1762491) static void ref_coo2csr(const int *cooRowindx, int nnz, int m, int *csrRowPtr, int base=0){ memset(csrRowPtr, 0, sizeof(int)*(m+1) ); // Fill csrRowPtr with zeros for (int i=0; i<nnz; i++){ // fill csrRowPtr with number of nnz per row int idx = cooRowindx[i]-base; csrRowPtr[idx]++; } int t = base; // total sum for(int i=0; i<m; i++){ int temp = csrRowPtr[i]; csrRowPtr[i] = t; t += temp; } csrRowPtr[m] = nnz + base; // last element is trivial } void ref_csr2coo(const int *csrRowindx, int nnz, int m, int *cooRowindx){ int base; cooRowindx[0] = csrRowindx[0]; base = csrRowindx[0]; for( int j = 0; j < m; j++) { int colStart = csrRowindx[j] - base; int colEnd = csrRowindx[j+1] - base; int rowNnz = colEnd - colStart; for ( int i = 0; i < rowNnz; i++) { cooRowindx[colStart+i] = j + base; } } } //////////////////////////////////////////////////////////////////////////////////////////////// // sort by row/col functions (not from cusparse) //////////////////////////////////////////////////////////////////////////////////////////////// struct comparator{ const std::vector<int>& values; comparator(const std::vector<int>& val_vec): values(val_vec) {} bool operator()(int n, int m){ return values[n] < values[m]; } }; template<typename T> void getSortPermutation(const std::vector<T>& minorOrder, const std::vector<T>& majorOrder, std::vector<int>& p){ int n = majorOrder.size(); p.clear(); p.reserve(n); for(int i=0; i < n; ++i) p.push_back(i); std::stable_sort(p.begin(), p.end(), comparator(minorOrder)); // first "minor" sort std::stable_sort(p.begin(), p.end(), comparator(majorOrder)); // second "major" sort } template<typename T> void ref_cooSortBySource(int n, const T *srcData, const int *srcRow, const int *srcCol, T *dstData, int *dstRow, int *dstCol){ std::vector<int> srcR(srcRow, srcRow + n); std::vector<int> srcC(srcCol, srcCol + n); std::vector<int> p(n, 0); getSortPermutation(srcC, srcR, p); // sort p according to srcC for (int i=0; i<n ; i++) { dstRow[i]=srcRow[p[i]]; dstCol[i]=srcCol[p[i]]; dstData[i]=srcData[p[i]]; } } template<typename T> void ref_cooSortByDestination(int nnz, const T *srcData, const int *srcRow, const int *srcCol, T *dstData, int *dstRow, int *dstCol){ ref_cooSortBySource(nnz, srcData, srcCol, srcRow, dstData, dstCol, dstRow); } //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// // Random generators //////////////////////////////////////////////////////////////////////////////////////////////// void randomArray(int n, void* arr, cudaDataType_t *dataType){ if(*dataType==CUDA_R_32F){ float* a = (float*)arr; for(int i=0; i<n; ++i) a[i] = (float)rand()/(rand()+1); // don't divide by 0. } else if(*dataType==CUDA_R_64F) { double* a = (double*)arr; for(int i=0; i<n; ++i) a[i] = (double)rand()/(rand()+1); // don't divide by 0. } else { FAIL(); } } void randomCOOGenerator( int *rowInd, int *colInd, int *nnz, int n, int maxPerRow, int maxjump, int max_nnz) { int nnzCounter = 0; for(int row = 0 ; row<n && nnzCounter<max_nnz; row++){ int elementsPerRow = 0; int col = 0; while( elementsPerRow<maxPerRow && nnzCounter<max_nnz ){ int jump = (rand() % maxjump) +1; col += jump; if (col >= n) break; rowInd[nnzCounter] = row; colInd[nnzCounter] = col; nnzCounter++; elementsPerRow++; } } *nnz = nnzCounter; } void randomCsrGenerator( int *rowPtr, int *colInd, int *nnz, int n, int maxPerRow, int maxjump, int max_nnz) { int *rowInd = (int*)malloc (sizeof(int)*max_nnz); randomCOOGenerator(rowInd, colInd, nnz, n, maxPerRow, maxjump, max_nnz); ref_coo2csr(rowInd, *nnz, n, rowPtr); free(rowInd); } typedef enum{ HOST = 0, DEVICE = 1 } addressSpace_t; class NVGraphAPIConvertTest : public ::testing::Test { public: nvgraphStatus_t status; nvgraphHandle_t handle; NVGraphAPIConvertTest() : handle(NULL) {} // static void SetupTestCase() {} // static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } srand (time(NULL)); } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } // CPU conversion (reference) template <typename T> static void refConvert(nvgraphTopologyType_t srcTType, void *srcTopology, const T *srcEdgeData, nvgraphTopologyType_t dstTType, void *dstTopology, T *dstEdgeData){ // Trust me, this a 100 times better than nested ifs. if(srcTType==NVGRAPH_CSR_32 && dstTType==NVGRAPH_CSR_32){ // CSR2CSR nvgraphCSRTopology32I_t srcT = static_cast<nvgraphCSRTopology32I_t >(srcTopology); nvgraphCSRTopology32I_t dstT = static_cast<nvgraphCSRTopology32I_t >(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; memcpy(dstEdgeData, srcEdgeData, sizeof(T)*srcT->nedges); memcpy(dstT->source_offsets, srcT->source_offsets, sizeof(int)*(srcT->nvertices+1) ); memcpy(dstT->destination_indices, srcT->destination_indices, sizeof(int)*(srcT->nedges) ); } else if(srcTType==NVGRAPH_CSR_32 && dstTType==NVGRAPH_CSC_32) { // CSR2CSC nvgraphCSRTopology32I_t srcT = static_cast<nvgraphCSRTopology32I_t >(srcTopology); nvgraphCSCTopology32I_t dstT = static_cast<nvgraphCSCTopology32I_t >(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; ref_csr2csc<T> (srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_offsets, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_offsets); } else if(srcTType==NVGRAPH_CSR_32 && dstTType==NVGRAPH_COO_32) { // CSR2COO nvgraphCSRTopology32I_t srcT = static_cast<nvgraphCSRTopology32I_t >(srcTopology); nvgraphCOOTopology32I_t dstT = static_cast<nvgraphCOOTopology32I_t >(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if(dstT->tag==NVGRAPH_DEFAULT || dstT->tag==NVGRAPH_UNSORTED || dstT->tag==NVGRAPH_SORTED_BY_SOURCE){ ref_csr2coo(srcT->source_offsets, srcT->nedges, srcT->nvertices, dstT->source_indices); memcpy(dstT->destination_indices, srcT->destination_indices, sizeof(int)*(srcT->nedges) ); memcpy(dstEdgeData, srcEdgeData, sizeof(T)*(srcT->nedges) ); } else if (dstT->tag==NVGRAPH_SORTED_BY_DESTINATION) { int* tmp=(int*)malloc(sizeof(int)*(dstT->nedges) ); // Step 1: Convert to COO Source ref_csr2coo(srcT->source_offsets, srcT->nedges, srcT->nvertices, tmp); // Step 2: Convert to COO Dest ref_cooSortByDestination(srcT->nedges, srcEdgeData, tmp, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_indices); free(tmp); } else { FAIL(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////// } else if(srcTType==NVGRAPH_CSC_32 && dstTType==NVGRAPH_CSR_32) { // CSC2CSR nvgraphCSCTopology32I_t srcT = static_cast<nvgraphCSCTopology32I_t >(srcTopology); nvgraphCSRTopology32I_t dstT = static_cast<nvgraphCSRTopology32I_t >(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; ref_csr2csc<T> (srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->destination_offsets, srcT->source_indices, dstEdgeData, dstT->destination_indices, dstT->source_offsets); } else if(srcTType==NVGRAPH_CSC_32 && dstTType==NVGRAPH_CSC_32) { // CSC2CSC nvgraphCSCTopology32I_t srcT = static_cast<nvgraphCSCTopology32I_t >(srcTopology); nvgraphCSCTopology32I_t dstT = static_cast<nvgraphCSCTopology32I_t >(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; memcpy(dstT->destination_offsets, srcT->destination_offsets, sizeof(int)*(srcT->nvertices+1) ); memcpy(dstT->source_indices, srcT->source_indices, sizeof(int)*(srcT->nedges) ); memcpy(dstEdgeData, srcEdgeData, sizeof(T)*(srcT->nedges) ); } else if(srcTType==NVGRAPH_CSC_32 && dstTType==NVGRAPH_COO_32) { // CSC2COO nvgraphCSCTopology32I_t srcT = static_cast<nvgraphCSCTopology32I_t >(srcTopology); nvgraphCOOTopology32I_t dstT = static_cast<nvgraphCOOTopology32I_t >(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if(dstT->tag==NVGRAPH_SORTED_BY_SOURCE){ int* tmp = (int*)malloc(sizeof(int)*(dstT->nedges)); // Step 1: Convert to COO Dest ref_csr2coo(srcT->destination_offsets, srcT->nedges, srcT->nvertices, tmp); // Step 2: Convert to COO Source ref_cooSortBySource(srcT->nedges, srcEdgeData, srcT->source_indices, tmp, dstEdgeData, dstT->source_indices, dstT->destination_indices); free(tmp); } else if (dstT->tag==NVGRAPH_DEFAULT || dstT->tag==NVGRAPH_UNSORTED || dstT->tag==NVGRAPH_SORTED_BY_DESTINATION) { ref_csr2coo(srcT->destination_offsets, srcT->nedges, srcT->nvertices, dstT->destination_indices); memcpy(dstT->source_indices, srcT->source_indices, sizeof(int)*(srcT->nedges) ); memcpy(dstEdgeData, srcEdgeData, sizeof(T)*(srcT->nedges) ); } else { FAIL(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////// } else if(srcTType==NVGRAPH_COO_32 && dstTType==NVGRAPH_CSR_32) { // COO2CSR nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t >(srcTopology); nvgraphCSRTopology32I_t dstT = static_cast<nvgraphCSRTopology32I_t >(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if(srcT->tag==NVGRAPH_SORTED_BY_SOURCE){ ref_coo2csr(srcT->source_indices, srcT->nedges, srcT->nvertices, dstT->source_offsets); memcpy(dstT->destination_indices, srcT->destination_indices, sizeof(int)*(srcT->nedges) ); memcpy(dstEdgeData, srcEdgeData, sizeof(T)*(srcT->nedges) ); } else if(srcT->tag==NVGRAPH_SORTED_BY_DESTINATION || srcT->tag==NVGRAPH_DEFAULT || srcT->tag==NVGRAPH_UNSORTED){ int *tmp = (int*)malloc(sizeof(int)*(srcT->nedges) ); // Step 1: convert to COO Dest ref_cooSortBySource(srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, tmp, dstT->destination_indices); // Step 1: convert to CSC ref_coo2csr(tmp, srcT->nedges, srcT->nvertices, dstT->source_offsets); free(tmp); } else { FAIL(); } } else if(srcTType==NVGRAPH_COO_32 && dstTType==NVGRAPH_CSC_32) { // COO2CSC nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t >(srcTopology); nvgraphCSCTopology32I_t dstT = static_cast<nvgraphCSCTopology32I_t >(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if(srcT->tag==NVGRAPH_SORTED_BY_SOURCE || srcT->tag==NVGRAPH_DEFAULT || srcT->tag==NVGRAPH_UNSORTED){ int *tmp = (int*)malloc(sizeof(int)*srcT->nedges); // Step 1: convert to COO dest ref_cooSortByDestination(srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_indices, tmp); // Step 1: convert to CSC ref_coo2csr(tmp, srcT->nedges, srcT->nvertices, dstT->destination_offsets); free(tmp); } else if(srcT->tag==NVGRAPH_SORTED_BY_DESTINATION) { ref_coo2csr(srcT->destination_indices, srcT->nedges, srcT->nvertices, dstT->destination_offsets); memcpy(dstT->source_indices, srcT->source_indices, sizeof(int)*(srcT->nedges) ); memcpy(dstEdgeData, srcEdgeData, sizeof(T)*(srcT->nedges) ); } else { FAIL(); } } else if(srcTType==NVGRAPH_COO_32 && dstTType==NVGRAPH_COO_32) { // COO2COO nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t >(srcTopology); nvgraphCOOTopology32I_t dstT = static_cast<nvgraphCOOTopology32I_t >(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if(srcT->tag==dstT->tag || dstT->tag==NVGRAPH_DEFAULT || dstT->tag==NVGRAPH_UNSORTED) { memcpy(dstT->source_indices, srcT->source_indices, sizeof(int)*(srcT->nedges) ); memcpy(dstT->destination_indices, srcT->destination_indices, sizeof(int)*(srcT->nedges) ); memcpy(dstEdgeData, srcEdgeData, sizeof(T)*srcT->nedges); } else if(dstT->tag==NVGRAPH_SORTED_BY_SOURCE) { ref_cooSortBySource(srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_indices); } else if(dstT->tag==NVGRAPH_SORTED_BY_DESTINATION) { ref_cooSortByDestination(srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_indices); } else { FAIL(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////// } else { FAIL(); } } /////////////////////////////////////////////////////////////////////////////////////////////////////// // Topology Helper functions /////////////////////////////////////////////////////////////////////////////////////////////////////// // The function must be void static void topoGetN(testTopologyType_t TType, void *topo, int* n){ int result=0; if(TType==CSR_32){ nvgraphCSRTopology32I_t t = static_cast<nvgraphCSRTopology32I_t >(topo); result = t->nvertices; } else if(TType==CSC_32){ nvgraphCSCTopology32I_t t = static_cast<nvgraphCSCTopology32I_t >(topo); result = t->nvertices; } else if(TType==COO_SOURCE_32 || TType==COO_DESTINATION_32 || TType==COO_UNSORTED_32 || TType==COO_DEFAULT_32){ nvgraphCOOTopology32I_t t = static_cast<nvgraphCOOTopology32I_t >(topo); result = t->nvertices; } else{ FAIL(); } *n=result; } // The function must be void static void topoGetNNZ(testTopologyType_t TType, void *topo, int*n){ int result=0; if(TType==CSR_32){ nvgraphCSRTopology32I_t t = static_cast<nvgraphCSRTopology32I_t >(topo); result = t->nedges; } else if(TType==CSC_32){ nvgraphCSCTopology32I_t t = static_cast<nvgraphCSCTopology32I_t >(topo); result = t->nedges; } else if(TType==COO_SOURCE_32 || TType==COO_DESTINATION_32 || TType==COO_UNSORTED_32 || TType==COO_DEFAULT_32){ nvgraphCOOTopology32I_t t = static_cast<nvgraphCOOTopology32I_t >(topo); result = t->nedges; } else{ FAIL(); } *n=result; } /////////////////////////////////////////////////////////////////////////////////////////////////////// // Allocation/de-allocation functions /////////////////////////////////////////////////////////////////////////////////////////////////////// static void allocateTopo(void **topoPtr, testTopologyType_t TType, int n, int nnz, addressSpace_t aSpace){ if(TType==CSR_32){ *topoPtr=(nvgraphCSRTopology32I_t)malloc(sizeof(nvgraphCSRTopology32I_st)); nvgraphCSRTopology32I_t p = static_cast<nvgraphCSRTopology32I_t >(*topoPtr); if(aSpace==HOST){ p->source_offsets = (int*)malloc(sizeof(int)*(n+1)); p->destination_indices = (int*)malloc(sizeof(int)*(nnz)); } else if(aSpace==DEVICE){ cudaMalloc((void**)&(p->source_offsets), sizeof(int)*(n+1)); cudaMalloc((void**)&(p->destination_indices), sizeof(int)*(nnz)); } else { FAIL(); } p->nvertices = n; p->nedges = nnz; } else if(TType==CSC_32){ *topoPtr=(nvgraphCSCTopology32I_t)malloc(sizeof(nvgraphCSCTopology32I_st)); nvgraphCSCTopology32I_t p = static_cast<nvgraphCSCTopology32I_t >(*topoPtr); if(aSpace==HOST){ p->destination_offsets = (int*)malloc(sizeof(int)*(n+1)); p->source_indices = (int*)malloc(sizeof(int)*(nnz)); } else if(aSpace==DEVICE){ cudaMalloc((void**)&(p->destination_offsets), sizeof(int)*(n+1)); cudaMalloc((void**)&(p->source_indices), sizeof(int)*(nnz)); } else { FAIL(); } p->nvertices = n; p->nedges = nnz; } else if(TType==COO_SOURCE_32 || TType==COO_DESTINATION_32 || TType==COO_UNSORTED_32 || TType==COO_DEFAULT_32){ *topoPtr=(nvgraphCOOTopology32I_t)malloc(sizeof(nvgraphCOOTopology32I_st)); nvgraphCOOTopology32I_t p = static_cast<nvgraphCOOTopology32I_t >(*topoPtr); if(aSpace==HOST){ p->source_indices = (int*)malloc(sizeof(int)*(nnz)); p->destination_indices = (int*)malloc(sizeof(int)*(nnz)); } else if(aSpace==DEVICE){ cudaMalloc((void**)&(p->source_indices), sizeof(int)*(nnz)); cudaMalloc((void**)&(p->destination_indices), sizeof(int)*(nnz)); } else { FAIL(); } p->nvertices = n; p->nedges = nnz; if(TType==COO_SOURCE_32) p->tag=NVGRAPH_SORTED_BY_SOURCE; else if(TType==COO_DESTINATION_32) p->tag=NVGRAPH_SORTED_BY_DESTINATION; else if(TType==COO_UNSORTED_32) p->tag=NVGRAPH_UNSORTED; else if(TType==COO_DEFAULT_32) p->tag=NVGRAPH_DEFAULT; else FAIL(); } else { FAIL(); } } static void deAllocateTopo(void* topo, testTopologyType_t TType, addressSpace_t aSpace){ if(topo==NULL) return; void *rowPtr, *colPtr; if(TType==CSR_32){ nvgraphCSRTopology32I_t p = static_cast<nvgraphCSRTopology32I_t >(topo); rowPtr = p->source_offsets; colPtr = p->destination_indices; free(p); } else if(TType==CSC_32){ nvgraphCSCTopology32I_t p = static_cast<nvgraphCSCTopology32I_t >(topo); rowPtr = p->source_indices; colPtr = p->destination_offsets; free(p); } else if(TType==COO_SOURCE_32 || TType==COO_DESTINATION_32 || TType==COO_UNSORTED_32 || TType==COO_DEFAULT_32){ nvgraphCOOTopology32I_t p = static_cast<nvgraphCOOTopology32I_t >(topo); rowPtr = p->source_indices; colPtr = p->destination_indices; free(p); } else { FAIL(); } if(aSpace==HOST){ free(rowPtr); free(colPtr); } else if (aSpace==DEVICE){ cudaFree(rowPtr); cudaFree(colPtr); } else { FAIL(); } } static void cpyTopo(void *dst, void *src, testTopologyType_t TType, enum cudaMemcpyKind kind=cudaMemcpyDefault){ int *srcRow=NULL, *srcCol=NULL; int *dstRow=NULL, *dstCol=NULL; int rowSize=0, colSize=0; if(TType==CSR_32) { nvgraphCSRTopology32I_t srcT = static_cast<nvgraphCSRTopology32I_t >(src); nvgraphCSRTopology32I_t dstT = static_cast<nvgraphCSRTopology32I_t >(dst); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; rowSize = srcT->nvertices+1; colSize = srcT->nedges; srcRow = srcT->source_offsets; dstRow = dstT->source_offsets; srcCol = srcT->destination_indices; dstCol = dstT->destination_indices; } else if(TType==CSC_32) { nvgraphCSCTopology32I_t srcT = static_cast<nvgraphCSCTopology32I_t >(src); nvgraphCSCTopology32I_t dstT = static_cast<nvgraphCSCTopology32I_t >(dst); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; rowSize = srcT->nedges; colSize = srcT->nvertices+1; srcRow = srcT->source_indices; dstRow = dstT->source_indices; srcCol = srcT->destination_offsets; dstCol = dstT->destination_offsets; } else if(TType==COO_SOURCE_32 || TType==COO_DESTINATION_32 || TType==COO_UNSORTED_32 || TType==COO_DEFAULT_32) { nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t >(src); nvgraphCOOTopology32I_t dstT = static_cast<nvgraphCOOTopology32I_t >(dst); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; dstT->tag = srcT->tag; rowSize = srcT->nedges; colSize = srcT->nedges; srcRow = srcT->source_indices; dstRow = dstT->source_indices; srcCol = srcT->destination_indices; dstCol = dstT->destination_indices; } else { FAIL(); } ASSERT_EQ(cudaSuccess, cudaMemcpy(dstRow, srcRow, sizeof(int)*rowSize, kind)); ASSERT_EQ(cudaSuccess, cudaMemcpy(dstCol, srcCol, sizeof(int)*colSize, kind)); } /////////////////////////////////////////////////////////////////////////////////////////////////////// // Comparison functions /////////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> static void cmpArray(T* ref, addressSpace_t refSapce, T* dst, addressSpace_t dstSpace, int n){ T *_refData=NULL, *_dstData=NULL; if(refSapce==DEVICE){ _refData = (T*)malloc(sizeof(T)*n); cudaMemcpy(_refData, ref, sizeof(T)*n, cudaMemcpyDefault); } else { _refData = ref; } if(dstSpace==DEVICE){ _dstData = (T*)malloc(sizeof(T)*n); cudaMemcpy(_dstData, dst, sizeof(T)*n, cudaMemcpyDefault); } else { _dstData = dst; } std::vector<T> refData; std::vector<T> dstData; refData.assign(_refData, _refData + n); dstData.assign(_dstData, _dstData + n); for(int i=0; i<refData.size(); ++i) ASSERT_EQ(refData[i], dstData[i]); // ASSERT_EQ(refData, dstData); if(refSapce==DEVICE) free(_refData); if(dstSpace==DEVICE) free(_dstData); } static void cmpTopo(nvgraphTopologyType_t TType, void *refTopology, addressSpace_t refSpace, void *dstTopology, addressSpace_t dstSpace){ int *_refRows=NULL, *_refCols=NULL; int *_dstRows=NULL, *_dstCols=NULL; int *refRowsHost=NULL, *refColsHost=NULL; int *dstRowsHost=NULL, *dstColsHost=NULL; int rowSize=0, colSize=0; if(TType==NVGRAPH_CSR_32){ nvgraphCSRTopology32I_t _refTopology = static_cast<nvgraphCSRTopology32I_t >(refTopology); nvgraphCSRTopology32I_t _dstTopology = static_cast<nvgraphCSRTopology32I_t >(dstTopology); ASSERT_EQ( _refTopology->nvertices, _dstTopology->nvertices); ASSERT_EQ( _refTopology->nedges, _dstTopology->nedges); _refRows = _refTopology->source_offsets; _refCols = _refTopology->destination_indices; _dstRows = _dstTopology->source_offsets; _dstCols = _dstTopology->destination_indices; colSize = _refTopology->nedges; rowSize = _refTopology->nvertices + 1; } else if(TType==NVGRAPH_CSC_32){ nvgraphCSCTopology32I_t _refTopology = static_cast<nvgraphCSCTopology32I_t >(refTopology); nvgraphCSCTopology32I_t _dstTopology = static_cast<nvgraphCSCTopology32I_t >(dstTopology); ASSERT_EQ( _refTopology->nvertices, _dstTopology->nvertices); ASSERT_EQ( _refTopology->nedges, _dstTopology->nedges); _refRows = _refTopology->source_indices; _refCols = _refTopology->destination_offsets; _dstRows = _dstTopology->source_indices; _dstCols = _dstTopology->destination_offsets; colSize = _refTopology->nvertices + 1; rowSize = _refTopology->nedges; } else if(TType==NVGRAPH_COO_32){ nvgraphCOOTopology32I_t _refTopology = static_cast<nvgraphCOOTopology32I_t >(refTopology); nvgraphCOOTopology32I_t _dstTopology = static_cast<nvgraphCOOTopology32I_t >(dstTopology); ASSERT_EQ( _refTopology->nvertices, _dstTopology->nvertices); ASSERT_EQ( _refTopology->nedges, _dstTopology->nedges); ASSERT_EQ( _refTopology->tag, _dstTopology->tag); _refRows = _refTopology->source_indices; _refCols = _refTopology->destination_indices; _dstRows = _dstTopology->source_indices; _dstCols = _dstTopology->destination_indices; colSize = _refTopology->nedges; rowSize = _refTopology->nedges; } else{ FAIL(); } if(refSpace==DEVICE){ refRowsHost = (int*)malloc(sizeof(int)*rowSize); refColsHost = (int*)malloc(sizeof(int)*colSize); cudaMemcpy(refRowsHost, _refRows, sizeof(int)*rowSize, cudaMemcpyDefault); cudaMemcpy(refColsHost, _refCols, sizeof(int)*colSize, cudaMemcpyDefault); } else { refRowsHost = _refRows; refColsHost = _refCols; } if(dstSpace==DEVICE){ dstRowsHost = (int*)malloc(sizeof(int)*rowSize); dstColsHost = (int*)malloc(sizeof(int)*colSize); cudaMemcpy(dstRowsHost, _dstRows, sizeof(int)*rowSize, cudaMemcpyDefault); cudaMemcpy(dstColsHost, _dstCols, sizeof(int)*colSize, cudaMemcpyDefault); } else { dstRowsHost = _dstRows; dstColsHost = _dstCols; } std::vector<int> refRows, refCols; std::vector<int> dstRows, dstCols; refRows.assign(refRowsHost, refRowsHost + rowSize); refCols.assign(refColsHost, refColsHost + colSize); dstRows.assign(dstRowsHost, dstRowsHost + rowSize); dstCols.assign(dstColsHost, dstColsHost + colSize); ASSERT_EQ(refRows, dstRows); ASSERT_EQ(refCols, dstCols); if(refSpace==DEVICE) { free(refRowsHost); free(refColsHost); } if(dstSpace==DEVICE){ free(dstRowsHost); free(dstColsHost); } } static nvgraphTopologyType_t testType2nvGraphType(testTopologyType_t type){ if(type==CSR_32) return NVGRAPH_CSR_32; else if(type==CSC_32) return NVGRAPH_CSC_32; else return NVGRAPH_COO_32; } static nvgraphTag_t testType2tag(testTopologyType_t type){ if(type==COO_SOURCE_32) return NVGRAPH_SORTED_BY_SOURCE; else if(type==COO_DESTINATION_32) return NVGRAPH_SORTED_BY_DESTINATION; else if(type==COO_UNSORTED_32) return NVGRAPH_UNSORTED; else return NVGRAPH_DEFAULT; } }; // Compares the convesion result from and to preset values (Used primary for simple test, and to validate reference convsrsion). class PresetTopology : public NVGraphAPIConvertTest, public ::testing::WithParamInterface<std::tr1::tuple< cudaDataType_t, // dataType testTopologyType_t, // srcTopoType testTopologyType_t, // dstTopoType presetTestContainer_st> > { // prestTestContainer public: // Reference (CPU) conversion check template <typename T> static void refPrestConvertTest(testTopologyType_t srcTestTopoType, void *srcTopology, const double *srcEdgeData, testTopologyType_t dstTestTopoType, void *refTopology, const double *refEdgeData){ int srcN=0, srcNNZ=0; int refN=0, refNNZ=0; topoGetN(srcTestTopoType, srcTopology, &srcN); topoGetNNZ(srcTestTopoType, srcTopology, &srcNNZ); topoGetN(dstTestTopoType, refTopology, &refN); topoGetNNZ(dstTestTopoType, refTopology, &refNNZ); // Allocate result Topology T *dstEdgeDataT = (T*)malloc(sizeof(T)*refNNZ); void *dstTopology=NULL; allocateTopo(&dstTopology, dstTestTopoType, refN, refNNZ, HOST); ////////////////////////////////////////////////// // Convert host edge data to template type T *srcEdgeDataT = (T*)malloc(sizeof(T)*srcNNZ); T *refEdgeDataT = (T*)malloc(sizeof(T)*refNNZ); const double *pT=(const double*)srcEdgeData; for(int i=0; i<srcNNZ; ++i) srcEdgeDataT[i]=(T)pT[i]; pT=(const double*)refEdgeData; for(int i=0; i<refNNZ; ++i) refEdgeDataT[i]=(T)pT[i]; ////////////////////////////////////////////////// nvgraphTopologyType_t srcTType, dstTType; srcTType = testType2nvGraphType(srcTestTopoType); dstTType = testType2nvGraphType(dstTestTopoType); refConvert(srcTType, srcTopology, srcEdgeDataT, dstTType, dstTopology, dstEdgeDataT); cmpTopo(dstTType, refTopology, HOST, dstTopology, HOST); cmpArray(refEdgeDataT, HOST, dstEdgeDataT, HOST, refNNZ); free(srcEdgeDataT); free(refEdgeDataT); free(dstEdgeDataT); deAllocateTopo(dstTopology, dstTestTopoType, HOST); } // nvgraph conversion test template <typename T> void nvgraphPresetConvertTest(testTopologyType_t srcTestTopoType, void *srcTopologyHst, const double *srcEdgeDataHst, cudaDataType_t *dataType, testTopologyType_t dstTestTopoType, void *refTopologyHst, const double *refEdgeDataHst){ int srcN=0, srcNNZ=0; int refN=0, refNNZ=0; topoGetN(srcTestTopoType, srcTopologyHst, &srcN); topoGetNNZ(srcTestTopoType, srcTopologyHst, &srcNNZ); topoGetN(dstTestTopoType, refTopologyHst, &refN); topoGetNNZ(dstTestTopoType, refTopologyHst, &refNNZ); // Allocate topoplogies in device memory void *srcTopologyDv=NULL, *dstTopologyDv=NULL; allocateTopo(&srcTopologyDv, srcTestTopoType, refN, refNNZ, DEVICE); allocateTopo(&dstTopologyDv, dstTestTopoType, refN, refNNZ, DEVICE); cpyTopo(srcTopologyDv, srcTopologyHst, srcTestTopoType, cudaMemcpyHostToDevice); // Copy src topology to device ////////////////////////////////////////////////// // Convert host edge data to template type T *srcEdgeDataHstT = (T*)malloc(sizeof(T)*srcNNZ); T *refEdgeDataHstT = (T*)malloc(sizeof(T)*refNNZ); const double *pT=(const double*)srcEdgeDataHst; for(int i=0; i<srcNNZ; ++i) srcEdgeDataHstT[i]=(T)pT[i]; pT=(const double*)refEdgeDataHst; for(int i=0; i<refNNZ; ++i) refEdgeDataHstT[i]=(T)pT[i]; ////////////////////////////////////////////////// // Allocate edge data in device memory T *srcEdgeDataDvT, *dstEdgeDataDvT; ASSERT_EQ(cudaSuccess, cudaMalloc((void**)&srcEdgeDataDvT, sizeof(T)*srcNNZ)); ASSERT_EQ(cudaSuccess, cudaMalloc((void**)&dstEdgeDataDvT, sizeof(T)*refNNZ)); ASSERT_EQ(cudaSuccess, cudaMemcpy(srcEdgeDataDvT, srcEdgeDataHstT, sizeof(T)*srcNNZ, cudaMemcpyDefault)); // Copy edge data to device ////////////////////////////////////////////////// nvgraphTopologyType_t srcTType, dstTType; srcTType = testType2nvGraphType(srcTestTopoType); dstTType = testType2nvGraphType(dstTestTopoType); status = nvgraphConvertTopology(handle, srcTType, srcTopologyDv, srcEdgeDataDvT, dataType, dstTType, dstTopologyDv, dstEdgeDataDvT); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cmpTopo(dstTType, refTopologyHst, HOST, dstTopologyDv, DEVICE); cmpArray(refEdgeDataHstT, HOST, dstEdgeDataDvT, DEVICE, refNNZ); free(srcEdgeDataHstT); free(refEdgeDataHstT); ASSERT_EQ(cudaSuccess, cudaFree(srcEdgeDataDvT)); ASSERT_EQ(cudaSuccess, cudaFree(dstEdgeDataDvT)); deAllocateTopo(srcTopologyDv, srcTestTopoType, DEVICE); deAllocateTopo(dstTopologyDv, dstTestTopoType, DEVICE); } /////////////////////////////////////////////////////////////////////////////////////////////////////// // Helper functions /////////////////////////////////////////////////////////////////////////////////////////////////////// static void getTestData(testTopologyType_t TType, void **topo, const void **edgeData, presetTestContainer_st prestTestContainer){ if(TType==CSR_32){ *topo = prestTestContainer.csrTopo; *edgeData = prestTestContainer.csrEdgeData; } else if(TType==CSC_32) { *topo = prestTestContainer.cscTopo; *edgeData = prestTestContainer.cscEdgeData; } else if(TType==COO_SOURCE_32) { *topo = prestTestContainer.coosTopo; *edgeData = prestTestContainer.coosEdgeData; } else if(TType==COO_DESTINATION_32) { *topo = prestTestContainer.coodTopo; *edgeData = prestTestContainer.coodEdgeData; } else if(TType==COO_UNSORTED_32) { *topo = prestTestContainer.coouTopo; *edgeData = prestTestContainer.coouEdgeData; } else if(TType==COO_DEFAULT_32) { *topo = prestTestContainer.coouTopo; *edgeData = prestTestContainer.coouEdgeData; } else { FAIL(); } } }; TEST_P(PresetTopology, referenceValidation) { cudaDataType_t dataType = std::tr1::get<0>(GetParam()); testTopologyType_t srcTestTopoType = std::tr1::get<1>(GetParam()); testTopologyType_t dstTestTopoType = std::tr1::get<2>(GetParam()); presetTestContainer_st prestTestContainer = std::tr1::get<3>(GetParam()); if(dstTestTopoType==COO_UNSORTED_32) return; void *srcTopology=NULL, *refTopology=NULL; const void *srcEdgeData=NULL, *refEdgeData=NULL; this->getTestData(srcTestTopoType, &srcTopology, &srcEdgeData, prestTestContainer); this->getTestData(dstTestTopoType, &refTopology, &refEdgeData, prestTestContainer); if(dataType==CUDA_R_32F) { this->refPrestConvertTest<float>(srcTestTopoType, srcTopology, (const double*)srcEdgeData, dstTestTopoType, refTopology, (const double*)refEdgeData); } else if (dataType==CUDA_R_64F) { this->refPrestConvertTest<double>(srcTestTopoType, srcTopology, (const double*)srcEdgeData, dstTestTopoType, refTopology, (const double*)refEdgeData); } else { FAIL(); } } TEST_P(PresetTopology, nvgraphConvertTopology) { cudaDataType_t dataType = std::tr1::get<0>(GetParam()); testTopologyType_t srcTestTopoType = std::tr1::get<1>(GetParam()); testTopologyType_t dstTestTopoType = std::tr1::get<2>(GetParam()); presetTestContainer_st prestTestContainer = std::tr1::get<3>(GetParam()); if(dstTestTopoType==COO_UNSORTED_32) return; void *srcTopology=NULL, *refTopology=NULL; const void *srcEdgeData=NULL, *refEdgeData=NULL; this->getTestData(srcTestTopoType, &srcTopology, &srcEdgeData, prestTestContainer); this->getTestData(dstTestTopoType, &refTopology, &refEdgeData, prestTestContainer); if(dataType==CUDA_R_32F){ this->nvgraphPresetConvertTest<float>( srcTestTopoType, srcTopology, (const double*)srcEdgeData, &dataType, dstTestTopoType, refTopology, (const double*)refEdgeData); } else if (dataType==CUDA_R_64F) { this->nvgraphPresetConvertTest<double>( srcTestTopoType, srcTopology, (const double*)srcEdgeData, &dataType, dstTestTopoType, refTopology, (const double*)refEdgeData); } else { FAIL(); } } class RandomTopology : public NVGraphAPIConvertTest, public ::testing::WithParamInterface<std::tr1::tuple< cudaDataType_t, // dataType testTopologyType_t, // srcTopoType testTopologyType_t, // dstTopoType int, // n int> > { // nnz public: virtual void SetUp() { NVGraphAPIConvertTest::SetUp(); } // nvgraph conversion check template <typename T> void nvgraphTopologyConvertTest(testTopologyType_t srcTestTopoType, void *srcTopologyHst, const double *srcEdgeDataHst, cudaDataType_t *dataType, testTopologyType_t dstTestTopoType){ int srcN=0, srcNNZ=0; topoGetN(srcTestTopoType, srcTopologyHst, &srcN); topoGetNNZ(srcTestTopoType, srcTopologyHst, &srcNNZ); // Allocate result space in host memory T *refResultEdgeDataT=(T*)malloc(sizeof(T)*srcNNZ); void *refResultTopologyHst=NULL; allocateTopo(&refResultTopologyHst, dstTestTopoType, srcN, srcNNZ, HOST); ////////////////////////////////////////////////// // Allocate topologies space in device memory void *srcTopologyDv=NULL, *resultTopologyDv=NULL; T *resultEdgeData=NULL; ASSERT_EQ(cudaSuccess, cudaMalloc( (void**)&resultEdgeData, sizeof(T)*srcNNZ) ); allocateTopo(&srcTopologyDv, srcTestTopoType, srcN, srcNNZ, DEVICE); allocateTopo(&resultTopologyDv, dstTestTopoType, srcN, srcNNZ, DEVICE); cpyTopo(srcTopologyDv, srcTopologyHst, srcTestTopoType, cudaMemcpyHostToDevice); // Copy src topology to device ////////////////////////////////////////////////// // Convert host edge data to template type T *srcEdgeDataHstT = (T*)malloc(sizeof(T)*srcNNZ); const double *pT=(const double*)srcEdgeDataHst; for(int i=0; i<srcNNZ; ++i) srcEdgeDataHstT[i]=(T)pT[i]; ////////////////////////////////////////////////// // Allocate edge data in device memory T *srcEdgeDataDvT, *resultEdgeDataDvT; ASSERT_EQ(cudaSuccess, cudaMalloc((void**)&srcEdgeDataDvT, sizeof(T)*srcNNZ)); ASSERT_EQ(cudaSuccess, cudaMalloc((void**)&resultEdgeDataDvT, sizeof(T)*srcNNZ)); ASSERT_EQ(cudaSuccess, cudaMemcpy(srcEdgeDataDvT, srcEdgeDataHstT, sizeof(T)*srcNNZ, cudaMemcpyDefault)); // Copy edge data to device ////////////////////////////////////////////////// nvgraphTopologyType_t srcTType, dstTType; srcTType = testType2nvGraphType(srcTestTopoType); dstTType = testType2nvGraphType(dstTestTopoType); refConvert(srcTType, srcTopologyHst, srcEdgeDataHstT, dstTType, refResultTopologyHst, refResultEdgeDataT); // Get reference result status = nvgraphConvertTopology(handle, srcTType, srcTopologyDv, srcEdgeDataDvT, dataType, dstTType, resultTopologyDv, resultEdgeDataDvT); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cmpTopo(dstTType, refResultTopologyHst, HOST, resultTopologyDv, DEVICE); cmpArray(refResultEdgeDataT, HOST, resultEdgeDataDvT, DEVICE, srcNNZ); free(refResultEdgeDataT); free(srcEdgeDataHstT); ASSERT_EQ(cudaSuccess, cudaFree(resultEdgeData)); ASSERT_EQ(cudaSuccess, cudaFree(srcEdgeDataDvT)); ASSERT_EQ(cudaSuccess, cudaFree(resultEdgeDataDvT)); deAllocateTopo(refResultTopologyHst, dstTestTopoType, HOST); deAllocateTopo(srcTopologyDv, srcTestTopoType, DEVICE); deAllocateTopo(resultTopologyDv, dstTestTopoType, DEVICE); } // nvgraph conversion check template <typename T> void nvgraphGraphConvertTest(testTopologyType_t srcTestTopoType, void *srcTopologyHst, const double *srcEdgeDataHst, cudaDataType_t *dataType, testTopologyType_t dstTestTopoType){ int srcN=0, srcNNZ=0; topoGetN(srcTestTopoType, srcTopologyHst, &srcN); topoGetNNZ(srcTestTopoType, srcTopologyHst, &srcNNZ); // Allocate result space in host memory T *refResultEdgeDataT=(T*)malloc(sizeof(T)*srcNNZ); void *refResultTopologyHst=NULL; allocateTopo(&refResultTopologyHst, dstTestTopoType, srcN, srcNNZ, HOST); ////////////////////////////////////////////////// // Allocate topologies space in device memory void *srcTopologyDv=NULL, *resultTopologyDv=NULL; T *resultEdgeData=NULL; ASSERT_EQ(cudaSuccess, cudaMalloc( (void**)&resultEdgeData, sizeof(T)*srcNNZ) ); allocateTopo(&srcTopologyDv, srcTestTopoType, srcN, srcNNZ, DEVICE); allocateTopo(&resultTopologyDv, dstTestTopoType, srcN, srcNNZ, DEVICE); cpyTopo(srcTopologyDv, srcTopologyHst, srcTestTopoType, cudaMemcpyHostToDevice); // Copy src topology to device ////////////////////////////////////////////////// // Convert host edge data to template type T *srcEdgeDataHstT = (T*)malloc(sizeof(T)*srcNNZ); const double *pT=(const double*)srcEdgeDataHst; for(int i=0; i<srcNNZ; ++i) srcEdgeDataHstT[i]=(T)pT[i]; ////////////////////////////////////////////////// // Allocate edge data in device memory T *srcEdgeDataDvT, *resultEdgeDataDvT; ASSERT_EQ(cudaSuccess, cudaMalloc((void**)&srcEdgeDataDvT, sizeof(T)*srcNNZ)); ASSERT_EQ(cudaSuccess, cudaMalloc((void**)&resultEdgeDataDvT, sizeof(T)*srcNNZ)); ASSERT_EQ(cudaSuccess, cudaMemcpy(srcEdgeDataDvT, srcEdgeDataHstT, sizeof(T)*srcNNZ, cudaMemcpyDefault)); // Copy edge data to device ////////////////////////////////////////////////// nvgraphTopologyType_t srcTType, dstTType; srcTType = testType2nvGraphType(srcTestTopoType); dstTType = testType2nvGraphType(dstTestTopoType); refConvert(srcTType, srcTopologyHst, srcEdgeDataHstT, dstTType, refResultTopologyHst, refResultEdgeDataT); // Get reference result status = nvgraphConvertTopology(handle, srcTType, srcTopologyDv, srcEdgeDataDvT, dataType, dstTType, resultTopologyDv, resultEdgeDataDvT); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); cmpTopo(dstTType, refResultTopologyHst, HOST, resultTopologyDv, DEVICE); cmpArray(refResultEdgeDataT, HOST, resultEdgeDataDvT, DEVICE, srcNNZ); free(refResultEdgeDataT); free(srcEdgeDataHstT); ASSERT_EQ(cudaSuccess, cudaFree(resultEdgeData)); ASSERT_EQ(cudaSuccess, cudaFree(srcEdgeDataDvT)); ASSERT_EQ(cudaSuccess, cudaFree(resultEdgeDataDvT)); deAllocateTopo(refResultTopologyHst, dstTestTopoType, HOST); deAllocateTopo(srcTopologyDv, srcTestTopoType, DEVICE); deAllocateTopo(resultTopologyDv, dstTestTopoType, DEVICE); } }; TEST_P(RandomTopology, nvgraphConvertTopology) { cudaDataType_t dataType = std::tr1::get<0>(GetParam()); testTopologyType_t srcTestTopoType = std::tr1::get<1>(GetParam()); testTopologyType_t dstTestTopoType = std::tr1::get<2>(GetParam()); int n = std::tr1::get<3>(GetParam()); int max_nnz = std::tr1::get<4>(GetParam()); int maxJump = (rand() % n)+1; int maxPerRow = (rand() % max_nnz)+1; int nnz; void *srcTopology; allocateTopo(&srcTopology, srcTestTopoType, n, max_nnz, HOST); if(srcTestTopoType==CSR_32) { nvgraphCSRTopology32I_t srcT = static_cast<nvgraphCSRTopology32I_t >(srcTopology); randomCsrGenerator( srcT->source_offsets, srcT->destination_indices, &nnz, n, maxPerRow, maxJump, max_nnz); srcT->nedges = nnz; } else if(srcTestTopoType==CSC_32) { nvgraphCSCTopology32I_t srcT = static_cast<nvgraphCSCTopology32I_t >(srcTopology); randomCsrGenerator( srcT->destination_offsets, srcT->source_indices, &nnz, n, maxPerRow, maxJump, max_nnz); srcT->nedges = nnz; } else if(srcTestTopoType==COO_SOURCE_32) { nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t >(srcTopology); randomCOOGenerator( srcT->source_indices, srcT->destination_indices, &nnz, n, maxPerRow, maxJump, max_nnz); srcT->nedges = nnz; } else if(srcTestTopoType==COO_DESTINATION_32 || srcTestTopoType==COO_UNSORTED_32 || srcTestTopoType==COO_DEFAULT_32) { // Unsorted and default to have COO_dest sorting. (sorted is a special case of unsorted array) nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t >(srcTopology); randomCOOGenerator( srcT->destination_indices, srcT->source_indices, &nnz, n, maxPerRow, maxJump, max_nnz); srcT->nedges = nnz; } else { FAIL(); } double *srcEdgeData = (double*)malloc(sizeof(double)*nnz); for(int i=0; i<nnz; ++i) srcEdgeData[i]=(double)rand()/(rand()+1); // don't divide by zero if(dataType==CUDA_R_32F){ this->nvgraphTopologyConvertTest<float> (srcTestTopoType, srcTopology, srcEdgeData, &dataType, dstTestTopoType); } else if (dataType==CUDA_R_64F) { this->nvgraphTopologyConvertTest<double> (srcTestTopoType, srcTopology, srcEdgeData, &dataType, dstTestTopoType); } else { FAIL(); } deAllocateTopo(srcTopology, srcTestTopoType, HOST); free(srcEdgeData); } class RandomGraph : public NVGraphAPIConvertTest, public ::testing::WithParamInterface<std::tr1::tuple< cudaDataType_t, // dataType testTopologyType_t, // srcTopoType testTopologyType_t, // dstTopoType int, // n int> > { // nnz public: nvgraphGraphDescr_t srcGrDesc, dstGrDesc, refGrDesc; void *srcEdgeData, *dstEdgeData, *refEdgeData; void *srcVertexData, *dstVertexData, *refVertexData; void *srcTopology, *refTopology; nvgraphTopologyType_t srcTopoType, dstTopoType; testTopologyType_t srcTestTopoType, dstTestTopoType; virtual void SetUp() { NVGraphAPIConvertTest::SetUp(); status = nvgraphCreateGraphDescr(handle, &srcGrDesc); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphCreateGraphDescr(handle, &dstGrDesc); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphCreateGraphDescr(handle, &refGrDesc); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); srcEdgeData = NULL; dstEdgeData = NULL; refEdgeData = NULL; srcVertexData = NULL; dstVertexData = NULL; refVertexData = NULL; srcTopology = NULL; refTopology = NULL; } virtual void TearDown() { if(srcGrDesc!=NULL){ status = nvgraphDestroyGraphDescr(handle, srcGrDesc); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } if(dstGrDesc!=NULL){ status = nvgraphDestroyGraphDescr(handle, dstGrDesc); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } if(refGrDesc!=NULL){ status = nvgraphDestroyGraphDescr(handle, refGrDesc); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } free(srcEdgeData); free(dstEdgeData); free(refEdgeData); free(srcVertexData); free(dstVertexData); free(refVertexData); deAllocateTopo(srcTopology, srcTestTopoType, HOST); deAllocateTopo(refTopology, dstTestTopoType, HOST); NVGraphAPIConvertTest::TearDown(); } }; TEST_P(RandomGraph, nvgraphConvertGraph) { cudaDataType_t dataType = std::tr1::get<0>(GetParam()); srcTestTopoType = std::tr1::get<1>(GetParam()); dstTestTopoType = std::tr1::get<2>(GetParam()); int n = std::tr1::get<3>(GetParam()); int max_nnz = std::tr1::get<4>(GetParam()); int maxJump = (rand() % n)+1; int maxPerRow = (rand() % max_nnz)+1; int nnz; nvgraphTopologyType_t srcTopoType, dstTopoType; srcTopoType = testType2nvGraphType(srcTestTopoType); dstTopoType = testType2nvGraphType(dstTestTopoType); /////////////////////////////////////////////////////////////////////////////////////////////////////// // Prepare input graph /////////////////////////////////////////////////////////////////////////////////////////////////////// allocateTopo(&srcTopology, srcTestTopoType, n, max_nnz, HOST); if(srcTestTopoType==CSR_32) { nvgraphCSRTopology32I_t srcT = static_cast<nvgraphCSRTopology32I_t >(srcTopology); randomCsrGenerator( srcT->source_offsets, srcT->destination_indices, &nnz, n, maxPerRow, maxJump, max_nnz); srcT->nedges = nnz; } else if(srcTestTopoType==CSC_32) { nvgraphCSCTopology32I_t srcT = static_cast<nvgraphCSCTopology32I_t >(srcTopology); randomCsrGenerator( srcT->destination_offsets, srcT->source_indices, &nnz, n, maxPerRow, maxJump, max_nnz); srcT->nedges = nnz; } else if(srcTestTopoType==COO_SOURCE_32) { nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t >(srcTopology); randomCOOGenerator( srcT->source_indices, srcT->destination_indices, &nnz, n, maxPerRow, maxJump, max_nnz); srcT->nedges = nnz; } else if(srcTestTopoType==COO_DESTINATION_32 || srcTestTopoType==COO_UNSORTED_32 || srcTestTopoType==COO_DEFAULT_32) { // Unsorted and default to have COO_dest sorting. (sorted is a special case of unsorted array) nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t >(srcTopology); randomCOOGenerator( srcT->destination_indices, srcT->source_indices, &nnz, n, maxPerRow, maxJump, max_nnz); srcT->nedges = nnz; } else { FAIL(); } status = nvgraphSetGraphStructure(handle, srcGrDesc, srcTopology, srcTopoType); if(srcTopoType==NVGRAPH_CSR_32 || srcTopoType==NVGRAPH_CSC_32){ ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } else if (srcTopoType==NVGRAPH_COO_32){ // COO graph is not supported ASSERT_EQ(NVGRAPH_STATUS_TYPE_NOT_SUPPORTED, status); return; } else { FAIL(); } /////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// // Prepeate data arrays /////////////////////////////////////////////////////////////////////////////////////////////////////// if(dataType==CUDA_R_32F){ srcEdgeData = malloc(sizeof(float)*nnz); dstEdgeData = malloc(sizeof(float)*nnz); refEdgeData = malloc(sizeof(float)*nnz); srcVertexData = malloc(sizeof(float)*n); dstVertexData = malloc(sizeof(float)*n); refVertexData = malloc(sizeof(float)*n); } else if (dataType==CUDA_R_64F){ srcEdgeData = malloc(sizeof(double)*nnz); dstEdgeData = malloc(sizeof(double)*nnz); refEdgeData = malloc(sizeof(double)*nnz); srcVertexData = malloc(sizeof(double)*n); dstVertexData = malloc(sizeof(double)*n); refVertexData = malloc(sizeof(double)*n); } else FAIL(); if(srcEdgeData==NULL || dstEdgeData==NULL || refEdgeData==NULL) FAIL(); if(srcVertexData==NULL || dstVertexData==NULL || refVertexData==NULL) FAIL(); /////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// // Prepare reference graph /////////////////////////////////////////////////////////////////////////////////////////////////////// allocateTopo(&refTopology, dstTestTopoType, n, nnz, HOST); if(dataType==CUDA_R_32F) refConvert( srcTopoType, srcTopology, (float*)srcEdgeData, dstTopoType, refTopology, (float*)refEdgeData ); // We don't care about edgeData else if (dataType==CUDA_R_64F) refConvert( srcTopoType, srcTopology, (double*)srcEdgeData, dstTopoType, refTopology, (double*)refEdgeData ); // We don't care about edgeData else FAIL(); status = nvgraphSetGraphStructure(handle, refGrDesc, refTopology, dstTopoType); if( dstTopoType==NVGRAPH_CSR_32 || dstTopoType==NVGRAPH_CSC_32){ ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } else if (dstTopoType==NVGRAPH_COO_32) { // We don't support COO graphs ASSERT_EQ(NVGRAPH_STATUS_TYPE_NOT_SUPPORTED, status); return; } else { FAIL(); } /////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// // Fill graph with vertex and edge data /////////////////////////////////////////////////////////////////////////////////////////////////////// size_t edgeDataDim = (rand() % 11); // up to 10 edgeData sets std::vector<cudaDataType_t> edgeDataType(edgeDataDim); std::fill (edgeDataType.begin(), edgeDataType.end(), dataType); status = nvgraphAllocateEdgeData( handle, srcGrDesc, edgeDataDim, edgeDataType.data()); if(edgeDataDim==0) ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); else ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData( handle, refGrDesc, edgeDataDim, edgeDataType.data()); if(edgeDataDim==0) ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); else ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for(size_t i=0; i<edgeDataDim; ++i){ randomArray(nnz, srcEdgeData, &dataType); // src Graph status = nvgraphSetEdgeData(handle, srcGrDesc, srcEdgeData, i); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // ref Graph (not the fastest approach, but I'm too lazy to do the permutation approach) if(dataType==CUDA_R_32F) refConvert( srcTopoType, srcTopology, (float*)srcEdgeData, dstTopoType, refTopology, (float*)refEdgeData ); else if (dataType==CUDA_R_64F) refConvert( srcTopoType, srcTopology, (double*)srcEdgeData, dstTopoType, refTopology, (double*)refEdgeData ); else FAIL(); status = nvgraphSetEdgeData(handle, refGrDesc, refEdgeData, i); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } size_t vertexDataDim = (rand() % 6); // up to 5 vertexData sets std::vector<cudaDataType_t> vertexDataType(vertexDataDim); std::fill (vertexDataType.begin(), vertexDataType.end(), dataType); status = nvgraphAllocateVertexData( handle, srcGrDesc, vertexDataDim, vertexDataType.data()); if(vertexDataDim==0) ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); else ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateVertexData( handle, refGrDesc, vertexDataDim, vertexDataType.data()); if(vertexDataDim==0) ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); else ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); for(size_t i=0; i<vertexDataDim; ++i){ randomArray(n, srcVertexData, &dataType); // src Graph status = nvgraphSetVertexData(handle, srcGrDesc, srcVertexData, i); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, refGrDesc, srcVertexData, i); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } /////////////////////////////////////////////////////////////////////////////////////////////////////// // Convert Graph status = nvgraphConvertGraph(handle, srcGrDesc, dstGrDesc, dstTopoType); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // /////////////////////////////////////////////////////////////////////////////////////////////////////// // // Compare // /////////////////////////////////////////////////////////////////////////////////////////////////////// int ref_nvertices, ref_nedges, dst_nvertices, dst_nedges; int *dstOffset, *dstInd, *refOffset, *refInd; if(dataType==CUDA_R_32F){ nvgraph::MultiValuedCsrGraph<int, float> *refMCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*> (refGrDesc->graph_handle); ref_nvertices = static_cast<int>(refMCSRG->get_num_vertices()); ref_nedges = static_cast<int>(refMCSRG->get_num_edges()); refOffset = refMCSRG->get_raw_row_offsets(); refInd = refMCSRG->get_raw_column_indices(); nvgraph::MultiValuedCsrGraph<int, float> *dstMCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*> (dstGrDesc->graph_handle); dst_nvertices = static_cast<int>(dstMCSRG->get_num_vertices()); dst_nedges = static_cast<int>(dstMCSRG->get_num_edges()); dstOffset = dstMCSRG->get_raw_row_offsets(); dstInd = dstMCSRG->get_raw_column_indices(); } else if (dataType==CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *refMCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*> (refGrDesc->graph_handle); ref_nvertices = static_cast<int>(refMCSRG->get_num_vertices()); ref_nedges = static_cast<int>(refMCSRG->get_num_edges()); refOffset = refMCSRG->get_raw_row_offsets(); refInd = refMCSRG->get_raw_column_indices(); nvgraph::MultiValuedCsrGraph<int, double> *dstMCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*> (dstGrDesc->graph_handle); dst_nvertices = static_cast<int>(dstMCSRG->get_num_vertices()); dst_nedges = static_cast<int>(dstMCSRG->get_num_edges()); dstOffset = dstMCSRG->get_raw_row_offsets(); dstInd = dstMCSRG->get_raw_column_indices(); } else FAIL(); ASSERT_EQ(ref_nvertices, dst_nvertices); ASSERT_EQ(ref_nedges, dst_nedges); cmpArray(refOffset, DEVICE, dstOffset, DEVICE, n+1); cmpArray(refInd, DEVICE, dstInd, DEVICE, nnz); for(size_t i=0; i<edgeDataDim; ++i){ status = nvgraphGetEdgeData(handle, refGrDesc, refEdgeData, i); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetEdgeData(handle, dstGrDesc, dstEdgeData, i); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); if(dataType==CUDA_R_32F) cmpArray((float*)refEdgeData, HOST, (float*)dstEdgeData, HOST, nnz); else if (dataType==CUDA_R_64F) cmpArray((double*)refEdgeData, HOST, (double*)dstEdgeData, HOST, nnz); else FAIL(); } for(size_t i=0; i<vertexDataDim; ++i){ status = nvgraphGetVertexData(handle, refGrDesc, refVertexData, i); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(handle, dstGrDesc, dstVertexData, i); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); if(dataType==CUDA_R_32F) cmpArray((float*)refVertexData, HOST, (float*)dstVertexData, HOST, n); else if (dataType==CUDA_R_64F) cmpArray((double*)refVertexData, HOST, (double*)dstVertexData, HOST, n); else FAIL(); } } cudaDataType_t DATA_TYPES[] = {CUDA_R_32F, CUDA_R_64F}; testTopologyType_t SRC_TOPO_TYPES[] = {CSR_32, CSC_32, COO_SOURCE_32, COO_DESTINATION_32, COO_UNSORTED_32}; testTopologyType_t DST_TOPO_TYPES[] = {CSR_32, CSC_32, COO_SOURCE_32, COO_DESTINATION_32, COO_UNSORTED_32}; int ns[] = {10, 100, 1000, 50000, 100000, 200000, 300000, 456179, 500000, 1000000}; int nnzs[] = {10, 100, 1000, 25000, 28943, 50000, 100000, 200000}; INSTANTIATE_TEST_CASE_P(PresetTopologyConvertTest, PresetTopology, ::testing::Combine( ::testing::ValuesIn(DATA_TYPES), // dataType ::testing::ValuesIn(SRC_TOPO_TYPES), // srcTopoType ::testing::ValuesIn(DST_TOPO_TYPES), // dstTopoType ::testing::ValuesIn(presetTests) // testData )); INSTANTIATE_TEST_CASE_P(RandomTopologyConvertTest, RandomTopology, ::testing::Combine( ::testing::ValuesIn(DATA_TYPES), // dataType ::testing::ValuesIn(SRC_TOPO_TYPES), // srcTopoType ::testing::ValuesIn(DST_TOPO_TYPES), // dstTopoType ::testing::ValuesIn(ns), // n ::testing::ValuesIn(nnzs) // nnz )); INSTANTIATE_TEST_CASE_P(RandomGraphConvertTest, RandomGraph, ::testing::Combine( ::testing::ValuesIn(DATA_TYPES), // dataType ::testing::ValuesIn(SRC_TOPO_TYPES), // srcTopoType ::testing::ValuesIn(DST_TOPO_TYPES), // dstTopoType ::testing::ValuesIn(ns), // n ::testing::ValuesIn(nnzs) // nnz )); int main(int argc, char **argv){ ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_capi_tests_subgraph.cpp
#include <iostream> #include <vector> #include <algorithm> #include <functional> #include <iterator> #include <fstream> #include <cassert> #include <sstream> #include <string> #include <cstdio> #include "gtest/gtest.h" #include "valued_csr_graph.hxx" #include "nvgraphP.h" #include "nvgraph.h" static std::string ref_data_prefix = ""; static std::string graph_data_prefix = ""; std::string convert_to_local_path(const std::string& in_file) { std::string wstr = in_file; if ((wstr != "dummy") & (wstr != "")) { std::string prefix; if (graph_data_prefix.length() > 0) { prefix = graph_data_prefix; } else { #ifdef _WIN32 //prefix = "C:\\mnt\\eris\\test\\matrices_collection\\"; prefix = "Z:\\matrices_collection\\"; std::replace(wstr.begin(), wstr.end(), '/', '\\'); #else prefix = "/mnt/nvgraph_test_data/"; #endif } wstr = prefix + wstr; } return wstr; } //annonymus: namespace { class file_read_error { public: file_read_error(const std::string& msg) : msg_(msg) { msg_ = std::string("File read error: ") + msg; } ~file_read_error() { } const std::string& what() const { return (msg_); } private: std::string msg_; }; template<typename Vector> void fill_extraction_data(const std::string& fname, Vector& g_row_offsets, Vector& g_col_indices, Vector& aggregates, Vector& cg_row_offsets, Vector& cg_col_indices) { typedef typename Vector::value_type T; std::ifstream m_stream(fname.c_str(), std::ifstream::in); std::string line; if (!m_stream.is_open()) { throw file_read_error(fname); } bool keep_going = !std::getline(m_stream, line).eof(); //debug: //std::cout<<line<<std::endl; if (!keep_going) return; char c; int g_nrows = 0; int g_nnz = 0; std::sscanf(line.c_str(), "%c: nrows=%d, nnz=%d", &c, &g_nrows, &g_nnz); //debug: //std::cout<<c<<","<<g_nrows<<","<<g_nnz<<"\n"; int n_entries = g_nrows + 1; g_row_offsets.reserve(n_entries); //ignore next line: // if (!std::getline(m_stream, line)) return; //read G row_offsets: for (int i = 0; (i < n_entries) && keep_going; ++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; g_row_offsets.push_back(value); } //ignore next 2 lines: // if (!std::getline(m_stream, line) || !std::getline(m_stream, line)) return; g_col_indices.reserve(g_nnz); //read G col_indices: for (int i = 0; (i < g_nnz) && keep_going; ++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; g_col_indices.push_back(value); } //ignore next line: // if (!std::getline(m_stream, line)) return; //remove the following for extraction: //{ if (!std::getline(m_stream, line)) return; int n_aggs = 0; std::sscanf(line.c_str(), "aggregate: size=%d", &n_aggs); //assert( n_aggs == g_nrows );//not true for subgraph extraction! aggregates.reserve(n_aggs); //read aggregate: for (int i = 0; (i < n_aggs) && keep_going; ++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; aggregates.push_back(value); } //} end remove code for extraction if (!keep_going || !std::getline(m_stream, line)) return; int cg_nrows = 0; int cg_nnz = 0; std::sscanf(line.c_str(), "result %c: nrows=%d, nnz=%d", &c, &cg_nrows, &cg_nnz); //debug: //std::cout<<c<<","<<cg_nrows<<","<<cg_nnz<<"\n"; // //m_stream.close();//not really needed...destructor handles this //return; n_entries = cg_nrows + 1; cg_row_offsets.reserve(n_entries); //ignore next line: // if (!std::getline(m_stream, line)) return; //read G row_offsets: for (int i = 0; (i < n_entries) && keep_going; ++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; cg_row_offsets.push_back(value); } //ignore next 2 lines: // if (!std::getline(m_stream, line) || !std::getline(m_stream, line)) return; cg_col_indices.reserve(cg_nnz); //read G col_indices: for (int i = 0; (i < cg_nnz) && keep_going; ++i) { T value(0); keep_going = !std::getline(m_stream, line).eof(); std::stringstream ss(line); ss >> value; cg_col_indices.push_back(value); } m_stream.close(); //not really needed...destructor handles this } template<typename Vector> bool check_diffs(const Vector& v1, const Vector& v2) { typedef typename Vector::value_type T; Vector v(v1.size(), 0); std::transform(v1.begin(), v1.end(), v2.begin(), v.begin(), std::minus<T>()); if (std::find_if(v.begin(), v.end(), std::bind2nd(std::not_equal_to<T>(), 0)) != v.end()) return true; else return false; } //check if sort(delta(r1)) == sort(delta(r2)) //where delta(r)={r[i+1]-r[i] | i <- [0..|r|-1]} // template<typename Vector> bool check_delta_invariant(const Vector& r1, const Vector& r2) { typedef typename Vector::value_type T; size_t sz = r1.size(); assert(sz == r2.size()); Vector d1(sz - 1); std::transform(r1.begin() + 1, r1.end(), r1.begin(), d1.begin(), std::minus<int>()); Vector d2(sz - 1); std::transform(r2.begin() + 1, r2.end(), r2.begin(), d2.begin(), std::minus<int>()); std::sort(d1.begin(), d1.end()); std::sort(d2.begin(), d2.end()); return (d1 == d2); } } class NvgraphCAPITests_SubgraphCSR: public ::testing::Test { public: NvgraphCAPITests_SubgraphCSR() : nvgraph_handle(NULL), initial_graph(NULL) { } protected: static void SetupTestCase() { } static void TearDownTestCase() { } virtual void SetUp() { if (nvgraph_handle == NULL) { status = nvgraphCreate(&nvgraph_handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // set up graph status = nvgraphCreateGraphDescr(nvgraph_handle, &initial_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st topoData; topoData.nvertices = 5; topoData.nedges = 9; int neighborhood[] = { 0, 2, 3, 5, 7, 9 }; int edgedest[] = { 1, 3, 3, 1, 4, 0, 2, 2, 4 }; topoData.source_offsets = neighborhood; topoData.destination_indices = edgedest; status = nvgraphSetGraphStructure( nvgraph_handle, initial_graph, (void*) &topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph data size_t numsets = 2; float vertexvals0[] = { 0.1f, 0.15893e-20f, 1e27f, 13.2f, 0.f }; float vertexvals1[] = { 13., 322.64, 1e28, -1.4, 22.3 }; void* vertexptr[] = { (void*) vertexvals0, (void*) vertexvals1 }; cudaDataType_t type_v[] = { CUDA_R_32F, CUDA_R_32F }; float edgevals0[] = { 0.1f, 0.9153e-20f, 0.42e27f, 185.23, 1e21f, 15.6f, 215.907f, 912.2f, 0.2f }; float edgevals1[] = { 13., 322.64, 1e28, 197534.2, 0.1, 0.425e-5, 5923.4, 0.12e-12, 52. }; void* edgeptr[] = { (void*) edgevals0, (void*) edgevals1 }; cudaDataType_t type_e[] = { CUDA_R_32F, CUDA_R_32F }; status = nvgraphAllocateVertexData(nvgraph_handle, initial_graph, numsets, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(nvgraph_handle, initial_graph, (void *) vertexptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(nvgraph_handle, initial_graph, (void *) vertexptr[1], 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(nvgraph_handle, initial_graph, numsets, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(nvgraph_handle, initial_graph, (void *) edgeptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(nvgraph_handle, initial_graph, (void *) edgeptr[1], 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //save data - those will be available in the tests directly graph_neigh.assign(neighborhood, neighborhood + topoData.nvertices + 1); graph_edged.assign(edgedest, edgedest + topoData.nedges); graph_vvals0.assign(vertexvals0, vertexvals0 + topoData.nvertices); graph_vvals1.assign(vertexvals1, vertexvals1 + topoData.nvertices); graph_evals0.assign(edgevals0, edgevals0 + topoData.nedges); graph_evals1.assign(edgevals1, edgevals1 + topoData.nedges); } virtual void TearDown() { // destroy graph status = nvgraphDestroyGraphDescr(nvgraph_handle, initial_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // release library if (nvgraph_handle != NULL) { status = nvgraphDestroy(nvgraph_handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraph_handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t nvgraph_handle; nvgraphGraphDescr_t initial_graph; std::vector<int> graph_neigh; std::vector<int> graph_edged; std::vector<float> graph_vvals0; std::vector<float> graph_vvals1; std::vector<float> graph_evals0; std::vector<float> graph_evals1; }; class NvgraphCAPITests_SubgCSR_Isolated: public ::testing::Test { public: NvgraphCAPITests_SubgCSR_Isolated() : nvgraph_handle(NULL), initial_graph(NULL) { } protected: static void SetupTestCase() { } static void TearDownTestCase() { } virtual void SetUp() { if (nvgraph_handle == NULL) { status = nvgraphCreate(&nvgraph_handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // set up graph status = nvgraphCreateGraphDescr(nvgraph_handle, &initial_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /* * here is the graph we'll test with: * 0 -> 2 * 1 -> 3 * * Extracting the subgraph that uses vertices 0, 1, 3 will get * a graph with 3 vertices and 1 edge... and that edge won't * use vertex id 0. Pre bug fix the resulting graph is a single * edge: 0 -> 3 which does not even exist in the original graph. */ nvgraphCSRTopology32I_st topoData; std::vector<int> v_neighborhood { 0, 1, 2, 2, 2 }; std::vector<int> v_edgedest{ 2, 3 }; topoData.nvertices = v_neighborhood.size(); topoData.nedges = v_edgedest.size(); topoData.source_offsets = v_neighborhood.data(); topoData.destination_indices = v_edgedest.data(); status = nvgraphSetGraphStructure( nvgraph_handle, initial_graph, (void*) &topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); graph_neigh = v_neighborhood; graph_edged = v_edgedest; } virtual void TearDown() { // destroy graph status = nvgraphDestroyGraphDescr(nvgraph_handle, initial_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // release library if (nvgraph_handle != NULL) { status = nvgraphDestroy(nvgraph_handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraph_handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t nvgraph_handle; nvgraphGraphDescr_t initial_graph; std::vector<int> graph_neigh; std::vector<int> graph_edged; }; TEST_F(NvgraphCAPITests_SubgCSR_Isolated, CSRSubgraphVertices_Bug60) { nvgraphStatus_t status; nvgraphGraphDescr_t temp_graph2 = NULL; { status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //int vertices[] = { 0, 1, 17 }; int vertices[] = { 0, 1, 3 }; status = nvgraphExtractSubgraphByVertex(nvgraph_handle, initial_graph, temp_graph2, vertices, sizeof(vertices) / sizeof(vertices[0])); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; int tData_source_offsets[3], tData_destination_indices[3]; tData.source_offsets = tData_source_offsets; tData.destination_indices = tData_destination_indices; nvgraphTopologyType_t TT; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph2, (void*) &tData, &TT); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(TT, NVGRAPH_CSR_32); ASSERT_EQ(tData.nvertices, 3); ASSERT_EQ(tData.nedges, 1); // check structure ASSERT_EQ(tData.source_offsets[0], 0); ASSERT_EQ(tData.source_offsets[1], 0); ASSERT_EQ(tData.source_offsets[2], 1); ASSERT_EQ(tData.source_offsets[3], 1); ASSERT_EQ(tData.destination_indices[0], 2); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } TEST_F(NvgraphCAPITests_SubgraphCSR, CSRSubgraphVertices_Sanity) { nvgraphStatus_t status; nvgraphGraphDescr_t temp_graph1 = NULL, temp_graph2 = NULL; float getVvals0[4]; float getVvals1[4]; float getEvals0[4]; float getEvals1[4]; { status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int vertices[] = { 2, 4 }; status = nvgraphExtractSubgraphByVertex( nvgraph_handle, initial_graph, temp_graph2, vertices, 2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; int tData_source_offsets[3], tData_destination_indices[3]; tData.source_offsets = tData_source_offsets; tData.destination_indices = tData_destination_indices; nvgraphTopologyType_t TT; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph2, (void*) &tData, &TT); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(TT, NVGRAPH_CSR_32); ASSERT_EQ(tData.nvertices, 2); ASSERT_EQ(tData.nedges, 3); status = nvgraphGetVertexData(nvgraph_handle, temp_graph2, (void *) getVvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(nvgraph_handle, temp_graph2, (void *) getVvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph2, (void *) getEvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph2, (void *) getEvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // we are extracting two vertices, but we are not sure which of them will be #0 and which will be #1 // we are comparing vertex values to determine that and handle both cases if (getVvals0[0] == graph_vvals0[vertices[0]]) //vertex #0 in new graph - vertex #2 in old graph //vertex #1 in new graph - vertex #4 in old graph { // check that vertex values are extracted correctly ASSERT_EQ(getVvals0[0], graph_vvals0[vertices[0]]); ASSERT_EQ(getVvals1[0], graph_vvals1[vertices[0]]); ASSERT_EQ(getVvals0[1], graph_vvals0[vertices[1]]); ASSERT_EQ(getVvals1[1], graph_vvals1[vertices[1]]); // check that edge values are extracted correctly ASSERT_EQ(getEvals0[0], graph_evals0[4]); ASSERT_EQ(getEvals0[1], graph_evals0[7]); ASSERT_EQ(getEvals0[2], graph_evals0[8]); ASSERT_EQ(getEvals1[0], graph_evals1[4]); ASSERT_EQ(getEvals1[1], graph_evals1[7]); ASSERT_EQ(getEvals1[2], graph_evals1[8]); // Check structure ASSERT_EQ(tData.source_offsets[0], 0); ASSERT_EQ(tData.source_offsets[1], 1); ASSERT_EQ(tData.source_offsets[2], 3); ASSERT_EQ(tData.destination_indices[0], 1); ASSERT_EQ(tData.destination_indices[1], 0); ASSERT_EQ(tData.destination_indices[2], 1); } //vertex #0 in new graph - vertex #4 in old graph //vertex #1 in new graph - vertex #2 in old graph else { // check that vertex values are extracted correctly ASSERT_EQ(getVvals0[0], graph_vvals0[vertices[1]]); ASSERT_EQ(getVvals0[1], graph_vvals0[vertices[0]]); ASSERT_EQ(getVvals1[0], graph_vvals1[vertices[1]]); ASSERT_EQ(getVvals1[1], graph_vvals1[vertices[0]]); // check that edge values are extracted correctly ASSERT_EQ(getEvals0[0], graph_evals0[7]); ASSERT_EQ(getEvals0[1], graph_evals0[8]); ASSERT_EQ(getEvals0[2], graph_evals0[4]); ASSERT_EQ(getEvals1[0], graph_evals1[7]); ASSERT_EQ(getEvals1[1], graph_evals1[8]); ASSERT_EQ(getEvals1[2], graph_evals1[4]); // check structure ASSERT_EQ(tData.source_offsets[0], 0); ASSERT_EQ(tData.source_offsets[1], 2); ASSERT_EQ(tData.source_offsets[2], 3); ASSERT_EQ(tData.destination_indices[0], 0); ASSERT_EQ(tData.destination_indices[1], 1); ASSERT_EQ(tData.destination_indices[2], 0); } status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } //@TODO: how to check extracting by multiple vertices? do we preserve order of vertices/edges? //@TODO: this would make sense only if vertices order is perserved in the extracted subgraph int vertices[4] = { 0, 1, 3, 4 }; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphExtractSubgraphByVertex(nvgraph_handle, initial_graph, temp_graph1, vertices, 3); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /*size_t nverts1 = 0, nedges1 = 0; int neighborget[5]; ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(nverts1, 4); status = nvgraphGetGraphNedges(nvgraph_handle, temp_graph1, &nedges1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(nedges1, 4); // check structure: status = nvgraphGetGraphNeighborhood(nvgraph_handle, temp_graph1, neighborget); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(neighborget[0], 0); ASSERT_EQ(neighborget[1], 2); ASSERT_EQ(neighborget[2], 3); ASSERT_EQ(neighborget[3], 4); ASSERT_EQ(neighborget[4], 4); int edgeget[4]; status = nvgraphGetGraphEdgeDest( nvgraph_handle, temp_graph1, edgeget); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(edgeget[0], 1); ASSERT_EQ(edgeget[1], 3); ASSERT_EQ(edgeget[2], 3); ASSERT_EQ(edgeget[3], 0); // check values status = nvgraphGetVertexData(nvgraph_handle, temp_graph1, (void *)getVvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(getVvals0[0], vertexvals0[vertices[0]]); ASSERT_EQ(getVvals0[1], vertexvals0[vertices[1]]); ASSERT_EQ(getVvals0[2], vertexvals0[vertices[2]]); ASSERT_EQ(getVvals0[3], vertexvals0[vertices[3]]); status = nvgraphGetVertexData(nvgraph_handle, temp_graph1, (void *)getVvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(getVvals1[0], vertexvals1[vertices[0]]); ASSERT_EQ(getVvals1[1], vertexvals1[vertices[1]]); ASSERT_EQ(getVvals1[2], vertexvals1[vertices[2]]); ASSERT_EQ(getVvals1[3], vertexvals1[vertices[3]]); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph1, (void *)getEvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(getEvals0[0], edgevals0[0]); ASSERT_EQ(getEvals0[1], edgevals0[1]); ASSERT_EQ(getEvals0[2], edgevals0[2]); ASSERT_EQ(getEvals0[3], edgevals0[6]); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph1, (void *)getEvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(getEvals1[0], edgevals1[0]); ASSERT_EQ(getEvals1[1], edgevals1[1]); ASSERT_EQ(getEvals1[2], edgevals1[2]); ASSERT_EQ(getEvals1[3], edgevals1[6]);*/ status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphCAPITests_SubgraphCSR, CSRSubgraphVertices_CornerCases) { nvgraphStatus_t status; nvgraphGraphDescr_t temp_graph1 = NULL, temp_graph2 = NULL; float getVvals0[4]; float getVvals1[4]; float getEvals0[4]; float getEvals1[4]; // failures { int vertices[2] = { 1, 3 }; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // bad library nvgraph_handle status = nvgraphExtractSubgraphByEdge(NULL, initial_graph, temp_graph1, vertices, 1); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // bad descriptor 1 status = nvgraphExtractSubgraphByEdge(nvgraph_handle, temp_graph2, temp_graph1, vertices, 1); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // bad descriptor 2 status = nvgraphExtractSubgraphByEdge( nvgraph_handle, initial_graph, temp_graph2, vertices, 1); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // NULL pointer status = nvgraphExtractSubgraphByEdge( nvgraph_handle, initial_graph, temp_graph1, (int*) NULL, 1); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // extract zero vertices - failure expected status = nvgraphExtractSubgraphByVertex( nvgraph_handle, initial_graph, temp_graph1, vertices, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // extracting vertices more than in original graph - failure expected int too_many_vertices[] = { 0, 1, 2, 3, 4, 5, 10, 15 }; status = nvgraphExtractSubgraphByVertex( nvgraph_handle, initial_graph, temp_graph1, too_many_vertices, 8); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // unexisting indices - failure expected int bad_vertices[] = { -1, 2, 15 }; status = nvgraphExtractSubgraphByVertex( nvgraph_handle, initial_graph, temp_graph1, bad_vertices, 3); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // Not connected vertices { int vertices[] = { 0, 2 }; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphExtractSubgraphByVertex( nvgraph_handle, initial_graph, temp_graph1, vertices, 2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; tData.source_offsets = NULL; tData.destination_indices = NULL; nvgraphTopologyType_t TT; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph1, (void*) &tData, &TT); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(TT, NVGRAPH_CSR_32); ASSERT_EQ(tData.nvertices, 2); ASSERT_EQ(tData.nedges, 0); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // extract vertex that has edge to itself { int vertices[] = { 4 }; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphExtractSubgraphByVertex( nvgraph_handle, initial_graph, temp_graph1, vertices, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; tData.source_offsets = NULL; tData.destination_indices = NULL; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph1, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(tData.nvertices, 1); ASSERT_EQ(tData.nedges, 1); status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph1, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(nvgraph_handle, temp_graph1, (void *) getVvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(nvgraph_handle, temp_graph1, (void *) getVvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph1, (void *) getEvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph1, (void *) getEvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(getVvals0[0], graph_vvals0[vertices[0]]); ASSERT_EQ(getVvals1[0], graph_vvals1[vertices[0]]); ASSERT_EQ(getEvals0[0], graph_evals0[graph_evals0.size() - 1]); ASSERT_EQ(getEvals1[0], graph_evals1[graph_evals0.size() - 1]); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // extract whole graph { int vertices[] = { 0, 1, 2, 3, 4 }; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphExtractSubgraphByVertex( nvgraph_handle, initial_graph, temp_graph1, vertices, 5); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; tData.source_offsets = NULL; tData.destination_indices = NULL; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph1, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(tData.nvertices, (int )graph_vvals0.size()); ASSERT_EQ(tData.nedges, (int )graph_evals0.size()); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } TEST_F(NvgraphCAPITests_SubgraphCSR, CSRSubgraphEdges_Sanity) { nvgraphStatus_t status; nvgraphGraphDescr_t temp_graph1 = NULL, temp_graph2 = NULL; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); float getVvals0[4]; float getVvals1[4]; float getEvals0[4]; float getEvals1[4]; // for all edges: try to extract graph using only 1 edge { for (int r = 0; r < (int) graph_vvals0.size() /* == nvertices */; r++) { for (int e = graph_neigh[r]; e < graph_neigh[r + 1]; e++) { status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphExtractSubgraphByEdge( nvgraph_handle, initial_graph, temp_graph2, &e, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; tData.source_offsets = NULL; tData.destination_indices = NULL; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph2, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(nvgraph_handle, temp_graph2, (void *) getVvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(nvgraph_handle, temp_graph2, (void *) getVvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph2, (void *) getEvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph2, (void *) getEvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // check structure - should always be 1 edge and 2 vertices, special case for the last edge, because it is from vertex #5 to itself if (e != (int) graph_evals0.size() - 1) { // check structure ASSERT_EQ(tData.nvertices, 2)<< "Row : " << r << ", Edge : " << e; ASSERT_EQ(tData.nedges, 1) << "Row : " << r << ", Edge : " << e; // check vertex data ASSERT_TRUE((getVvals0[0] == graph_vvals0[r]) || (getVvals0[0] == graph_vvals0[graph_edged[e]])) << getVvals0[0] << " " << graph_vvals0[r] << " " << graph_vvals0[graph_edged[e]]; ASSERT_TRUE((getVvals0[1] == graph_vvals0[r]) || (getVvals0[1] == graph_vvals0[graph_edged[e]])) << getVvals0[1] << " " << graph_vvals0[r] << " " << graph_vvals0[graph_edged[e]]; ASSERT_TRUE(getVvals0[0] != getVvals0[1]) << getVvals0[0] << " " << getVvals0[1]; ASSERT_TRUE((getVvals1[0] == graph_vvals1[r]) || (getVvals1[0] == graph_vvals1[graph_edged[e]])) << getVvals1[0] << " " << graph_vvals1[r] << " " << graph_vvals1[graph_edged[e]]; ASSERT_TRUE((getVvals1[1] == graph_vvals1[r]) || (getVvals1[1] == graph_vvals1[graph_edged[e]])) << getVvals1[1] << " " << graph_vvals1[r] << " " << graph_vvals1[graph_edged[e]]; ASSERT_TRUE(getVvals1[0] != getVvals1[1]) << getVvals1[0] << " " << getVvals1[1]; } else // special case for the last edge - from last vertex to itself { // check structure ASSERT_EQ(tData.nvertices, 1) << "Row : " << r << ", Edge : " << e; ASSERT_EQ(tData.nedges, 1) << "Row : " << r << ", Edge : " << e; // check vertex data ASSERT_TRUE(getVvals0[0] == graph_vvals0[r]) << getVvals0[0] << " " << graph_vvals0[r]; ASSERT_TRUE(getVvals1[0] == graph_vvals1[r]) << getVvals1[0] << " " << graph_vvals1[r]; } // check edge data ASSERT_EQ(getEvals0[0], graph_evals0[e])<< getEvals0[0] << " " << graph_evals0[e]; ASSERT_EQ(getEvals1[0], graph_evals1[e])<< getEvals1[0] << " " << graph_evals1[e]; status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } } //@TODO: we need somehow check extraction by multiple edges //@TODO: this would make sense only if vertices order is perserved in the extracted subgraph int edges[2] = { 1, 3 }; status = nvgraphExtractSubgraphByEdge(nvgraph_handle, initial_graph, temp_graph1, edges, 2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /*size_t nverts1 = 0, nedges1 = 0; status = nvgraphGetGraphNvertices(nvgraph_handle, temp_graph1, &nverts1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(nverts1, 3); status = nvgraphGetGraphNedges(nvgraph_handle, temp_graph1, &nedges1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(nedges1, 2); // check structure: int neighborget[4]; status = nvgraphGetGraphNeighborhood(nvgraph_handle, temp_graph1, neighborget); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(neighborget[0], 0); ASSERT_EQ(neighborget[1], 1); ASSERT_EQ(neighborget[2], 2); ASSERT_EQ(neighborget[3], 2); int edgeget[2]; status = nvgraphGetGraphEdgeDest( nvgraph_handle, temp_graph1, edgeget); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(edgeget[0], 2); ASSERT_EQ(edgeget[1], 0); status = nvgraphGetVertexData(nvgraph_handle, temp_graph1, (void *)getVvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(getVvals0[0], vertexvals0[0]); ASSERT_EQ(getVvals0[1], vertexvals0[2]); ASSERT_EQ(getVvals0[2], vertexvals0[3]); status = nvgraphGetVertexData(nvgraph_handle, temp_graph1, (void *)getVvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(getVvals1[0], vertexvals1[0]); ASSERT_EQ(getVvals1[1], vertexvals1[2]); ASSERT_EQ(getVvals1[2], vertexvals1[3]); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph1, (void *)getEvals0, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(getEvals0[0], edgevals0[edges[0]]); ASSERT_EQ(getEvals0[1], edgevals0[edges[1]]); status = nvgraphGetEdgeData(nvgraph_handle, temp_graph1, (void *)getEvals1, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(getEvals1[0], edgevals1[edges[0]]); ASSERT_EQ(getEvals1[1], edgevals1[edges[1]]);*/ status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphCAPITests_SubgraphCSR, CSRSubgraphEdges_CornerCases) { nvgraphStatus_t status; nvgraphGraphDescr_t temp_graph1 = NULL, temp_graph2 = NULL; // expected failures { int edges[2] = { 1, 3 }; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // bad library nvgraph_handle status = nvgraphExtractSubgraphByEdge(NULL, initial_graph, temp_graph2, edges, 1); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // bad descriptor 1 status = nvgraphExtractSubgraphByEdge(nvgraph_handle, temp_graph2, temp_graph1, edges, 1); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // bad descriptor 2 status = nvgraphExtractSubgraphByEdge(nvgraph_handle, initial_graph, temp_graph2, edges, 1); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // NULL pointer status = nvgraphExtractSubgraphByEdge( nvgraph_handle, initial_graph, temp_graph1, (int*) NULL, 1); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // extract zero edges - failure expected status = nvgraphExtractSubgraphByEdge(nvgraph_handle, initial_graph, temp_graph1, edges, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // bad edge number - in the C API we ask array consist of existing col_indices int bad_edge[1] = { -10 }; status = nvgraphExtractSubgraphByEdge( nvgraph_handle, initial_graph, temp_graph1, bad_edge, 1); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // more edges than exists in the graph - in the C API we ask array consist of existing col_indices int too_many_edges[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; status = nvgraphExtractSubgraphByEdge( nvgraph_handle, initial_graph, temp_graph1, too_many_edges, 10); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // not connected edges, which should create not connected graph { int edges[2] = { 0, 8 }; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphExtractSubgraphByEdge(nvgraph_handle, initial_graph, temp_graph1, edges, 2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; tData.source_offsets = NULL; tData.destination_indices = NULL; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph1, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // we extracting 2 edges: one between two vertices and another is from third vertex to itself ASSERT_EQ(tData.nvertices, 3); ASSERT_EQ(tData.nedges, 2); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // triangle. { int edges[2] = { 0, 2 }; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphExtractSubgraphByEdge(nvgraph_handle, initial_graph, temp_graph1, edges, 2); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; tData.source_offsets = NULL; tData.destination_indices = NULL; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph1, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // we extracting 2 edges, expecting new graph have 3 vertices and only 2 edges ASSERT_EQ(tData.nvertices, 3); ASSERT_EQ(tData.nedges, 2); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } // extract by edge to the self { int edges[1] = { 8 }; status = nvgraphCreateGraphDescr(nvgraph_handle, &temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphExtractSubgraphByEdge(nvgraph_handle, initial_graph, temp_graph1, edges, 1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st tData; tData.source_offsets = NULL; tData.destination_indices = NULL; status = nvgraphGetGraphStructure(nvgraph_handle, temp_graph1, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // we extracting 1 edge to the vertex itself, expecting new graph have only 1 vertex and 1 edge ASSERT_EQ(tData.nvertices, 1); ASSERT_EQ(tData.nedges, 1); status = nvgraphDestroyGraphDescr(nvgraph_handle, temp_graph1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } TEST_F(NvgraphCAPITests_SubgraphCSR, CSRContractionNetworkX) { nvgraphStatus_t status; try { nvgraphGraphDescr_t netx_graph = NULL; nvgraphGraphDescr_t extracted_graph = NULL; status = nvgraphCreateGraphDescr(nvgraph_handle, &netx_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphCreateGraphDescr(nvgraph_handle, &extracted_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); std::string fname(convert_to_local_path("graphs/networkx/extr_test.dat")); std::vector<int> g_row_offsets; std::vector<int> g_col_indices; std::vector<int> aggregates; std::vector<int> cg_row_offsets; std::vector<int> cg_col_indices; fill_extraction_data(fname, g_row_offsets, g_col_indices, aggregates, cg_row_offsets, cg_col_indices); //std::cout<<"********* step 1: \n"; ASSERT_EQ(g_row_offsets.empty(), false); ASSERT_EQ(g_col_indices.empty(), false); ASSERT_EQ(aggregates.empty(), false); ASSERT_EQ(cg_row_offsets.empty(), false); ASSERT_EQ(cg_col_indices.empty(), false); //std::cout<<"********* step 1.1: \n"; ASSERT_EQ(g_col_indices.size(), g_row_offsets.back()); ASSERT_EQ(cg_col_indices.size(), cg_row_offsets.back()); //std::cout<<"********* step 1.2: \n"; nvgraphCSRTopology32I_st topoData; topoData.nvertices = g_row_offsets.size() - 1; //last is nnz topoData.nedges = g_col_indices.size(); //std::cout<<"(n,m):"<<topoData.nvertices // <<", "<<topoData.nedges<<std::endl; topoData.source_offsets = &g_row_offsets[0]; topoData.destination_indices = &g_col_indices[0]; //std::cout<<"********* step 1.3: \n"; status = nvgraphSetGraphStructure(nvgraph_handle, netx_graph, (void*) &topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 2: \n"; size_t numsets = 1; std::vector<float> vdata(topoData.nvertices, 1.); void* vptr[] = { (void*) &vdata[0] }; cudaDataType_t type_v[] = { CUDA_R_32F }; std::vector<float> edata(topoData.nedges, 1.); void* eptr[] = { (void*) &edata[0] }; cudaDataType_t type_e[] = { CUDA_R_32F }; status = nvgraphAllocateVertexData(nvgraph_handle, netx_graph, numsets, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 3: \n"; status = nvgraphSetVertexData(nvgraph_handle, netx_graph, (void *) vptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 4: \n"; status = nvgraphAllocateEdgeData(nvgraph_handle, netx_graph, numsets, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 5: \n"; status = nvgraphSetEdgeData(nvgraph_handle, netx_graph, (void *) eptr[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 6: \n"; status = nvgraphExtractSubgraphByVertex(nvgraph_handle, netx_graph, extracted_graph, &aggregates[0], aggregates.size()); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 7: \n"; nvgraphCSRTopology32I_st tData; tData.source_offsets = NULL; tData.destination_indices = NULL; //1st time to get nvertices and nedges // status = nvgraphGetGraphStructure(nvgraph_handle, extracted_graph, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout<<"********* step 8: \n"; int cgnv = cg_row_offsets.size() - 1; int cgne = cg_col_indices.size(); ASSERT_EQ(tData.nvertices, cgnv); ASSERT_EQ(tData.nedges, cgne); //std::cout<<"********* step 9: \n"; std::vector<int> cgro(cgnv + 1, 0); std::vector<int> cgci(cgne, 0); tData.source_offsets = &cgro[0]; tData.destination_indices = &cgci[0]; //2nd time to get row_offsets and column_indices // status = nvgraphGetGraphStructure(nvgraph_handle, extracted_graph, (void*) &tData, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //std::cout << "cg row_offsets:\n"; //std::copy(cgro.begin(), cgro.end(), // std::ostream_iterator<int>(std::cout,"\n")); //std::cout << "cg col_indices:\n"; //std::copy(cgci.begin(), cgci.end(), // std::ostream_iterator<int>(std::cout,"\n")); //PROBLEM: might differ due to different vertex numbering // ///ASSERT_EQ(check_diffs(cg_row_offsets, cgro), false); ///ASSERT_EQ(check_diffs(cg_col_indices, cgci), false); //this is one invariant we can check, besides vector sizes: // ASSERT_EQ(check_delta_invariant(cg_row_offsets, cgro), true); //std::cout<<"********* step 10: \n"; status = nvgraphDestroyGraphDescr(nvgraph_handle, extracted_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(nvgraph_handle, netx_graph); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } catch (const file_read_error& ex) { std::cout << "Exception: " << ex.what() << ", waiving the test\n"; const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } catch (const std::exception& ex) { // dump exception: ASSERT_TRUE(false)<< "Exception: " << ex.what(); } catch(...) { ASSERT_TRUE(false) << "Exception: Unknown"; } } int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); for (int i = 0; i < argc; i++) { if (strcmp(argv[i], "--ref-data-dir") == 0) ref_data_prefix = std::string(argv[i + 1]); if (strcmp(argv[i], "--graph-data-dir") == 0) graph_data_prefix = std::string(argv[i + 1]); } return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/convert_preset_testcases.h
#include <nvgraph.h> // // Simple Conversion Matrices (1) // //------------------------------------------------------------------------------------- // // Matrix A // // 0.0 0.0 0.2 0.0 1.0 // // 0.3 0.7 0.0 1.2 0.0 // // 0.0 0.0 0.0 0.0 0.0 // // 0.0 0.0 8.6 0.0 0.0 // // 0.0 0.0 0.0 0.0 0.986410984960948401569841 // // // // n = 5; // // m = 5; // // nnz = 7; // // csrVal = {0.2, 1.0, 0.3, 0.7, 1.2, 8.6, 0.986410984960948401569841}; // // csrColInd = {2, 4, 0, 1, 3, 2, 4}; // // csrRowPtr = {0, 2, 5, 5, 6, 7}; // // // // cscVal = {0.3, 0.7, 0.2, 8.6, 1.2, 1.0, 0.986410984960948401569841}; // // cscRowInc = {1, 1, 0, 3, 1, 0, 4}; // // cscColPtr = {0, 1, 2, 4, 5, 7}; // // // // COOSourceVal = {0.2, 1.0, 0.3, 0.7, 1.2, 8.6, 0.986410984960948401569841}; // // COOSourceRowInc = {0, 0, 1, 1, 1, 3, 4}; // // COOSourceColInc = {2, 4, 0, 1, 3, 2, 4}; // // // // COODestVal = {0.3, 0.7, 0.2, 8.6, 1.2, 1.0, 0.986410984960948401569841}; // // COODestRowInc = {1, 1, 0, 3, 1, 0, 4}; // // COODestColInc = {0, 1, 2, 2, 3, 4, 4}; // //------------------------------------------------------------------------------------- #define SIMPLE_TEST_1_N 5 #define SIMPLE_TEST_1_NNZ 7 int SIMPLE_CSR_SOURCE_OFFSETS[SIMPLE_TEST_1_N+1] = {0, 2, 5, 5, 6, 7}; // rowPtr int SIMPLE_CSR_DESTINATION_INDICES[SIMPLE_TEST_1_NNZ] = {2, 4, 0 ,1 ,3 ,2 ,4}; // colInd int SIMPLE_CSC_SOURCE_INDICES[SIMPLE_TEST_1_NNZ] = {1, 1, 0, 3, 1, 0, 4}; // rowInc int SIMPLE_CSC_DESTINATION_OFFSETS[SIMPLE_TEST_1_N+1] = {0, 1, 2, 4, 5, 7}; // colPtr int SIMPLE_COOS_SOURCE_INDICES[SIMPLE_TEST_1_NNZ] = {0, 0, 1, 1, 1, 3, 4}; // row int SIMPLE_COOS_DESTINATION_INDICES[SIMPLE_TEST_1_NNZ] = {2, 4, 0, 1, 3, 2, 4}; // col int SIMPLE_COOD_SOURCE_INDICES[SIMPLE_TEST_1_NNZ] = {1, 1, 0, 3, 1, 0, 4}; // row int SIMPLE_COOD_DESTINATION_INDICES[SIMPLE_TEST_1_NNZ] = {0, 1, 2, 2, 3, 4, 4}; //col int SIMPLE_COOU_SOURCE_INDICES[SIMPLE_TEST_1_NNZ] = {4, 1, 0, 3, 0, 1, 1}; // row int SIMPLE_COOU_DESTINATION_INDICES[SIMPLE_TEST_1_NNZ] = {4, 1, 2, 2, 4, 3, 0}; //col const double SIMPLE_CSR_EDGE_DATA[SIMPLE_TEST_1_NNZ] = {0.2, 1.0, 0.3, 0.7, 1.2, 8.6, 0.986410984960948401569841}; const double SIMPLE_CSC_EDGE_DATA[SIMPLE_TEST_1_NNZ] = {0.3, 0.7, 0.2, 8.6, 1.2, 1.0, 0.986410984960948401569841}; const double SIMPLE_COOS_EDGE_DATA[SIMPLE_TEST_1_NNZ] = {0.2, 1.0, 0.3, 0.7, 1.2, 8.6, 0.986410984960948401569841}; const double SIMPLE_COOD_EDGE_DATA[SIMPLE_TEST_1_NNZ] = {0.3, 0.7, 0.2, 8.6, 1.2, 1.0, 0.986410984960948401569841}; const double SIMPLE_COOU_EDGE_DATA[SIMPLE_TEST_1_NNZ] = {0.986410984960948401569841, 0.7, 0.2, 8.6, 1.0, 1.2, 0.3}; nvgraphCSRTopology32I_st simpleCsrTopo = { SIMPLE_TEST_1_N, SIMPLE_TEST_1_NNZ, SIMPLE_CSR_SOURCE_OFFSETS, SIMPLE_CSR_DESTINATION_INDICES }; nvgraphCSCTopology32I_st simpleCscTopo = { SIMPLE_TEST_1_N, SIMPLE_TEST_1_NNZ, SIMPLE_CSC_DESTINATION_OFFSETS, SIMPLE_CSC_SOURCE_INDICES }; nvgraphCOOTopology32I_st simpleCooSourceTopo = { SIMPLE_TEST_1_N, SIMPLE_TEST_1_NNZ, SIMPLE_COOS_SOURCE_INDICES, SIMPLE_COOS_DESTINATION_INDICES, NVGRAPH_SORTED_BY_SOURCE }; nvgraphCOOTopology32I_st simpleCooDestTopo = { SIMPLE_TEST_1_N, SIMPLE_TEST_1_NNZ, SIMPLE_COOD_SOURCE_INDICES, SIMPLE_COOD_DESTINATION_INDICES, NVGRAPH_SORTED_BY_DESTINATION }; nvgraphCOOTopology32I_st simpleCooUnsortedTopo = { SIMPLE_TEST_1_N, SIMPLE_TEST_1_NNZ, SIMPLE_COOU_SOURCE_INDICES, SIMPLE_COOU_DESTINATION_INDICES, NVGRAPH_UNSORTED }; // //------------------------------------------------------------------------------------- struct presetTestContainer_st{ nvgraphCSRTopology32I_st* csrTopo; nvgraphCSCTopology32I_st* cscTopo; nvgraphCOOTopology32I_st* coosTopo; // source nvgraphCOOTopology32I_st* coodTopo; // dest nvgraphCOOTopology32I_st* coouTopo; // unsorted const void* csrEdgeData; const void* cscEdgeData; const void* coosEdgeData; const void* coodEdgeData; const void* coouEdgeData; }; typedef struct presetTestContainer_st *presetTestContainer_t; // Hold all test data in one container presetTestContainer_st simpleTest1 = { &simpleCsrTopo, &simpleCscTopo, &simpleCooSourceTopo, &simpleCooDestTopo, &simpleCooUnsortedTopo, SIMPLE_CSR_EDGE_DATA, SIMPLE_CSC_EDGE_DATA, SIMPLE_COOS_EDGE_DATA, SIMPLE_COOD_EDGE_DATA, SIMPLE_COOU_EDGE_DATA }; //------------------------------------------------------------------------------------- // Add your preset tests here presetTestContainer_st presetTests[] = {simpleTest1};
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_capi_tests_clustering.cpp
#include <utility> #include "gtest/gtest.h" #include "nvgraph_test_common.h" #include "valued_csr_graph.hxx" #include "readMatrix.hxx" #include "nvgraphP.h" #include "nvgraph.h" #include "nvgraph_experimental.h" #include "stdlib.h" #include <algorithm> extern "C" { #include "mmio.h" } #include "mm.hxx" // do the perf measurements, enabled by command line parameter '--perf' static int PERF = 0; // minimum vertices in the graph to perform perf measurements #define PERF_ROWS_LIMIT 1000 // number of repeats = multiplier/num_vertices #define PARTITIONER_ITER_MULTIPLIER 1 #define SELECTOR_ITER_MULTIPLIER 1 // iterations for stress tests = this multiplier * iterations for perf tests static int STRESS_MULTIPLIER = 10; static std::string ref_data_prefix = ""; static std::string graph_data_prefix = ""; // utility template <typename T> struct nvgraph_Const; template <> struct nvgraph_Const<double> { static const cudaDataType_t Type = CUDA_R_64F; static const double inf; static const double tol; typedef union fpint { double f; unsigned long u; } fpint_st; }; const double nvgraph_Const<double>::inf = DBL_MAX; const double nvgraph_Const<double>::tol = 1e-6; // this is what we use as a tolerance in the algorithms, more precision than this is useless for CPU reference comparison template <> struct nvgraph_Const<float> { static const cudaDataType_t Type = CUDA_R_32F; static const float inf; static const float tol; typedef union fpint { float f; unsigned u; } fpint_st; }; const float nvgraph_Const<float>::inf = FLT_MAX; const float nvgraph_Const<float>::tol = 1e-4; template <typename T> bool enough_device_memory(int n, int nnz, size_t add) { size_t mtotal, mfree; cudaMemGetInfo(&mfree, &mtotal); if (mfree > add + sizeof(T)*3*(n + nnz)) return true; return false; } std::string convert_to_local_path(const std::string& in_file) { std::string wstr = in_file; if ((wstr != "dummy") & (wstr != "")) { std::string prefix; if (graph_data_prefix.length() > 0) { prefix = graph_data_prefix; } else { #ifdef _WIN32 //prefix = "C:\\mnt\\eris\\test\\matrices_collection\\"; prefix = "Z:\\matrices_collection\\"; std::replace(wstr.begin(), wstr.end(), '/', '\\'); #else prefix = "/mnt/nvgraph_test_data/"; #endif } wstr = prefix + wstr; } return wstr; } std::string convert_to_local_path_refdata(const std::string& in_file) { std::string wstr = in_file; if ((wstr != "dummy") & (wstr != "")) { std::string prefix; if (ref_data_prefix.length() > 0) { prefix = ref_data_prefix; } else { #ifdef _WIN32 //prefix = "C:\\mnt\\eris\\test\\ref_data\\"; prefix = "Z:\\ref_data\\"; std::replace(wstr.begin(), wstr.end(), '/', '\\'); #else prefix = "/mnt/nvgraph_test_data/ref_data/"; #endif } wstr = prefix + wstr; } return wstr; } /**************************** * SPECTRAL CLUSTERING *****************************/ typedef struct SpectralClustering_Usecase_t { std::string graph_file; int clusters; int eigenvalues; nvgraphSpectralClusteringType_t algorithm; nvgraphClusteringMetric_t metric; SpectralClustering_Usecase_t(const std::string& a, int b, int c, nvgraphSpectralClusteringType_t d, nvgraphClusteringMetric_t e) : clusters(b), eigenvalues(c), algorithm(d), metric(e){ graph_file = convert_to_local_path(a);}; SpectralClustering_Usecase_t& operator=(const SpectralClustering_Usecase_t& rhs) { graph_file = rhs.graph_file; clusters = rhs.clusters; eigenvalues = rhs.eigenvalues; algorithm = rhs.algorithm; metric = rhs.metric; return *this; } } SpectralClustering_Usecase; class NVGraphCAPITests_SpectralClustering : public ::testing::TestWithParam<SpectralClustering_Usecase> { public: NVGraphCAPITests_SpectralClustering() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const SpectralClustering_Usecase& param) { const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << param.clusters; ss << param.eigenvalues; ss << param.algorithm; ss << param.metric; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file) + std::string("_") + ss.str().c_str(); nvgraphStatus_t status; int m, n, nnz; MM_typecode mc; FILE* fpin = fopen(param.graph_file.c_str(),"r"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; ASSERT_EQ(mm_properties<int>(fpin, 1, &mc, &m, &n, &nnz),0) << "could not read Matrix Market file properties"<< "\n"; ASSERT_TRUE(mm_is_matrix(mc)); ASSERT_TRUE(mm_is_coordinate(mc)); ASSERT_TRUE(m==n); ASSERT_FALSE(mm_is_complex(mc)); ASSERT_FALSE(mm_is_skew(mc)); // Allocate memory on host std::vector<int> cooRowIndA(nnz); std::vector<int> csrColIndA(nnz); std::vector<int> csrRowPtrA(n+1); std::vector<T> csrValA(nnz); ASSERT_EQ( (mm_to_coo<int,T>(fpin, 1, nnz, &cooRowIndA[0], &csrColIndA[0], &csrValA[0], NULL)) , 0)<< "could not read matrix data"<< "\n"; ASSERT_EQ( (coo_to_csr<int,T> (n, n, nnz, &cooRowIndA[0], &csrColIndA[0], &csrValA[0], NULL, &csrRowPtrA[0], NULL, NULL, NULL)), 0) << "could not covert COO to CSR "<< "\n"; ASSERT_EQ(fclose(fpin),0); //ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; int *clustering_d; if (!enough_device_memory<T>(n, nnz, sizeof(int)*(csrRowPtrA.size() + csrColIndA.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } cudaMalloc((void**)&clustering_d , n*sizeof(int)); nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSRTopology32I_st topology = {n, nnz, &csrRowPtrA[0], &csrColIndA[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSR_32); // set up graph data size_t numsets = 1; void* edgeptr[1] = {(void*)&csrValA[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; status = nvgraphAllocateEdgeData(handle, g1, numsets, type_e ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int weight_index = 0; struct SpectralClusteringParameter clustering_params; clustering_params.n_clusters = param.clusters; clustering_params.n_eig_vects = param.eigenvalues; clustering_params.algorithm = param.algorithm; clustering_params.evs_tolerance = 0.0f ; clustering_params.evs_max_iter = 0; clustering_params.kmean_tolerance = 0.0f; clustering_params.kmean_max_iter = 0; std::vector<int> random_assignments_h(n); std::vector<T> eigVals_h(param.eigenvalues); std::vector<T> eigVecs_h(n*param.eigenvalues); float score = 0.0, random_score = 0.0; if (PERF && n > PERF_ROWS_LIMIT) { double start, stop; start = second(); int repeat = std::max((int)((float)(PARTITIONER_ITER_MULTIPLIER)/n), 1); for (int i = 0; i < repeat; i++) status =nvgraphSpectralClustering(handle, g1, weight_index, &clustering_params, clustering_d, &eigVals_h[0], &eigVecs_h[0]); stop = second(); printf("&&&& PERF Time_%s %10.8f -ms\n", test_id.c_str(), 1000.0*(stop-start)/repeat); } else status =nvgraphSpectralClustering(handle, g1, weight_index, &clustering_params, clustering_d, &eigVals_h[0], &eigVecs_h[0]); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // Analyse quality status = nvgraphAnalyzeClustering(handle, g1, weight_index, param.clusters, clustering_d, param.metric, &score); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //printf("Score = %f\n", score); // === // Synthetic random for (int i=0; i<n; i++) { random_assignments_h[i] = rand() % param.clusters; //printf("%d ", random_assignments_h[i]); } status = nvgraphAnalyzeClustering(handle, g1, weight_index, param.clusters, &random_assignments_h[0], param.metric, &random_score); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //printf("Random modularity = %f\n", modularity2); if (param.metric == NVGRAPH_MODULARITY) EXPECT_GE(score, random_score); // we want higher modularity else EXPECT_GE(random_score, score); //we want less edge cut cudaFree(clustering_d); status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_SpectralClustering, CheckResultDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_SpectralClustering, CheckResultFloat) { run_current_test<float>(GetParam()); } // --gtest_filter=*ModularityCorrectness* INSTANTIATE_TEST_CASE_P(SpectralModularityCorrectnessCheck, NVGraphCAPITests_SpectralClustering, // graph FILE number of clusters # number of eigenvalues # ::testing::Values( SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 2, 2, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 3, 3, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/uk.mtx", 2, 2, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/uk.mtx", 3, 3, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/data.mtx", 3, 3, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/data.mtx", 5, 5, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/data.mtx", 7, 7, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/cti.mtx", 3, 3,NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/cti.mtx", 5, 5,NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/cti.mtx", 7, 7,NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY) ///// more instances ) ); // --gtest_filter=*ModularityCorner* INSTANTIATE_TEST_CASE_P(SpectralModularityCornerCheck, NVGraphCAPITests_SpectralClustering, // graph FILE number of clusters # number of eigenvalues # ::testing::Values( SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/uk.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/delaunay_n12.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/data.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/cti.mtx", 7, 4,NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 7, 4, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 17, 7, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY) // tests cases on coAuthorsDBLP may diverge on some cards (likely due to different normalization operation) //SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 7, 4,NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), //SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 17, 7,NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY) ///// more instances ) ); // --gtest_filter=*LanczosBlancedCutCorrectness* INSTANTIATE_TEST_CASE_P(SpectralLanczosBlancedCutCorrectnessCheck, NVGraphCAPITests_SpectralClustering, // graph FILE number of clusters # number of eigenvalues # ::testing::Values( SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 2, 2,NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 3, 3, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 4, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/uk.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n12.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/data.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/cti.mtx", 3, 3, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/cti.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT) ///// more instances ) ); // --gtest_filter=*LanczosBlancedCutCorner* INSTANTIATE_TEST_CASE_P(SpectralLanczosBlancedCutCornerCheck, NVGraphCAPITests_SpectralClustering, // graph FILE number of clusters # number of eigenvalues # ::testing::Values( SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/uk.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n12.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/data.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/cti.mtx", 7, 4,NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 17, 7, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_EDGE_CUT) // tests cases on coAuthorsDBLP may diverge on some cards (likely due to different normalization operation) //SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 7, 4,NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_EDGE_CUT), //SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 17, 7,NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_EDGE_CUT) ) ); // --gtest_filter=*LobpcgBlancedCutCorrectness* INSTANTIATE_TEST_CASE_P(SpectralLobpcgBlancedCutCorrectnessCheck, NVGraphCAPITests_SpectralClustering, // graph FILE number of clusters # number of eigenvalues # ::testing::Values( SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 2, 2,NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 3, 3, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 4, 4, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/uk.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n12.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/cti.mtx", 3, 3, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), SpectralClustering_Usecase("dimacs10/cti.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT) ///// more instances ) ); // --gtest_filter=*LobpcgBlancedCutCorner* INSTANTIATE_TEST_CASE_P(SpectralLobpcgBlancedCutCornerCheck, NVGraphCAPITests_SpectralClustering, // graph FILE number of clusters # number of eigenvalues # ::testing::Values( SpectralClustering_Usecase("dimacs10/delaunay_n10.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/uk.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/delaunay_n12.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/data.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/cti.mtx", 7, 4,NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 7, 4, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_EDGE_CUT), SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 17, 7, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_EDGE_CUT) // tests cases on coAuthorsDBLP may diverge on some cards (likely due to different normalization operation) //SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 7, 4,NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_EDGE_CUT), //SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 17, 7,NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_EDGE_CUT) ///// more instances ) ); //Followinf tests were commented becasue they are a bit redundent and quite long to run // previous tests already contain dataset with 1 million edges //// --gtest_filter=*ModularityLargeCorrectness* //INSTANTIATE_TEST_CASE_P(SpectralModularityLargeCorrectnessCheck, // NVGraphCAPITests_SpectralClustering, // // graph FILE number of clusters # number of eigenvalues # // ::testing::Values( // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 2, 2, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 3, 3, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 5, 5, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 7, 7, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 2, 2, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 3, 3, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 5, 5, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 7, 7, NVGRAPH_MODULARITY_MAXIMIZATION, NVGRAPH_MODULARITY) // ///// more instances // ) // ); // //// --gtest_filter=*LanczosBlancedCutLargeCorrectness* //INSTANTIATE_TEST_CASE_P(SpectralLanczosBlancedCutLargeCorrectnessCheck, // NVGraphCAPITests_SpectralClustering, // // graph FILE number of clusters # number of eigenvalues # // ::testing::Values( // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 2, 2, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 3, 3, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 5, 5, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LANCZOS, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 2, 2, NVGRAPH_BALANCED_CUT_LANCZOS,NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 3, 3, NVGRAPH_BALANCED_CUT_LANCZOS,NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 5, 5, NVGRAPH_BALANCED_CUT_LANCZOS,NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LANCZOS,NVGRAPH_RATIO_CUT) // ) // ); //// --gtest_filter=*LobpcgBlancedCutLargeCorrectness* //INSTANTIATE_TEST_CASE_P(SpectralLobpcgBlancedCutLargeCorrectnessCheck, // NVGraphCAPITests_SpectralClustering, // // graph FILE number of clusters # number of eigenvalues # // ::testing::Values( // //SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 2, 2, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 3, 3, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 5, 5, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/citationCiteseer.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 2, 2, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 3, 3, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 5, 5, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT), // SpectralClustering_Usecase("dimacs10/coAuthorsDBLP.mtx", 7, 7, NVGRAPH_BALANCED_CUT_LOBPCG, NVGRAPH_RATIO_CUT) // ) // ); /**************************** * SELECTOR *****************************/ typedef struct Selector_Usecase_t { std::string graph_file; nvgraphEdgeWeightMatching_t metric; Selector_Usecase_t(const std::string& a, nvgraphEdgeWeightMatching_t b) : metric(b){ graph_file = convert_to_local_path(a);}; Selector_Usecase_t& operator=(const Selector_Usecase_t& rhs) { graph_file = rhs.graph_file; metric = rhs.metric; return *this; } }Selector_Usecase; class NVGraphCAPITests_Selector : public ::testing::TestWithParam<Selector_Usecase> { public: NVGraphCAPITests_Selector() : handle(NULL) {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; template <typename T> void run_current_test(const Selector_Usecase& param) { const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; ss << param.metric; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + getFileName(param.graph_file)+ std::string("_") + ss.str().c_str(); nvgraphStatus_t status; int m, n, nnz; MM_typecode mc; FILE* fpin = fopen(param.graph_file.c_str(),"r"); ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; ASSERT_EQ(mm_properties<int>(fpin, 1, &mc, &m, &n, &nnz),0) << "could not read Matrix Market file properties"<< "\n"; ASSERT_TRUE(mm_is_matrix(mc)); ASSERT_TRUE(mm_is_coordinate(mc)); ASSERT_TRUE(m==n); ASSERT_FALSE(mm_is_complex(mc)); ASSERT_FALSE(mm_is_skew(mc)); // Allocate memory on host std::vector<int> cooRowIndA(nnz); std::vector<int> csrColIndA(nnz); std::vector<int> csrRowPtrA(n+1); std::vector<T> csrValA(nnz); ASSERT_EQ( (mm_to_coo<int,T>(fpin, 1, nnz, &cooRowIndA[0], &csrColIndA[0], &csrValA[0], NULL)) , 0)<< "could not read matrix data"<< "\n"; ASSERT_EQ( (coo_to_csr<int,T> (n, n, nnz, &cooRowIndA[0], &csrColIndA[0], &csrValA[0], NULL, &csrRowPtrA[0], NULL, NULL, NULL)), 0) << "could not covert COO to CSR "<< "\n"; ASSERT_EQ(fclose(fpin),0); //ASSERT_TRUE(fpin != NULL) << "Cannot read input graph file: " << param.graph_file << std::endl; if (!enough_device_memory<T>(n, nnz, sizeof(int)*(csrRowPtrA.size() + csrColIndA.size()))) { std::cout << "[ WAIVED ] " << test_info->test_case_name() << "." << test_info->name() << std::endl; return; } //int *aggregates_d; //cudaMalloc((void**)&aggregates_d , n*sizeof(int)); nvgraphGraphDescr_t g1 = NULL; status = nvgraphCreateGraphDescr(handle, &g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // set up graph nvgraphCSRTopology32I_st topology = {n, nnz, &csrRowPtrA[0], &csrColIndA[0]}; status = nvgraphSetGraphStructure(handle, g1, (void*)&topology, NVGRAPH_CSR_32); // set up graph data size_t numsets = 1; //void* vertexptr[1] = {(void*)&calculated_res[0]}; //cudaDataType_t type_v[1] = {nvgraph_Const<T>::Type}; void* edgeptr[1] = {(void*)&csrValA[0]}; cudaDataType_t type_e[1] = {nvgraph_Const<T>::Type}; //status = nvgraphAllocateVertexData(handle, g1, numsets, type_v); //ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); //status = nvgraphSetVertexData(handle, g1, vertexptr[0], 0, NVGRAPH_CSR_32 ); //ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphAllocateEdgeData(handle, g1, numsets, type_e ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, g1, (void *)edgeptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); int weight_index = 0; std::vector<int> aggregates_h(n); //std::vector<int> aggregates_global_h(n); size_t num_aggregates; size_t *num_aggregates_ptr = &num_aggregates; status = nvgraphHeavyEdgeMatching(handle, g1, weight_index, param.metric, &aggregates_h[0], num_aggregates_ptr); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); std::cout << "n = " << n << ", num aggregates = " << num_aggregates << std::endl; if (param.metric == NVGRAPH_SCALED_BY_DIAGONAL) EXPECT_EQ(num_aggregates, static_cast<size_t>(166)); // comparing against amgx result on poisson2D.mtx else EXPECT_LE(num_aggregates, static_cast<size_t>(n)); // just make sure the output make sense //for (int i=0; i<n; i++) //{ // printf("%d\n", aggregates_h[i]); //} status = nvgraphDestroyGraphDescr(handle, g1); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } }; TEST_P(NVGraphCAPITests_Selector, CheckResultDouble) { run_current_test<double>(GetParam()); } TEST_P(NVGraphCAPITests_Selector, CheckResultFloat) { run_current_test<float>(GetParam()); } // --gtest_filter=*Correctness* INSTANTIATE_TEST_CASE_P(SmallCorrectnessCheck, NVGraphCAPITests_Selector, // graph FILE SIMILARITY_METRIC ::testing::Values( Selector_Usecase("Florida/poisson2D.mtx", NVGRAPH_SCALED_BY_DIAGONAL), Selector_Usecase("dimacs10/delaunay_n10.mtx", NVGRAPH_SCALED_BY_ROW_SUM), Selector_Usecase("dimacs10/delaunay_n10.mtx", NVGRAPH_UNSCALED), Selector_Usecase("dimacs10/uk.mtx", NVGRAPH_SCALED_BY_ROW_SUM), Selector_Usecase("dimacs10/uk.mtx", NVGRAPH_UNSCALED), Selector_Usecase("dimacs10/data.mtx", NVGRAPH_SCALED_BY_ROW_SUM), Selector_Usecase("dimacs10/data.mtx", NVGRAPH_UNSCALED), Selector_Usecase("dimacs10/cti.mtx", NVGRAPH_SCALED_BY_ROW_SUM), Selector_Usecase("dimacs10/cti.mtx", NVGRAPH_UNSCALED) ///// more instances ) ); int main(int argc, char **argv) { srand(42); ::testing::InitGoogleTest(&argc, argv); for (int i = 0; i < argc; i++) { if (strcmp(argv[i], "--perf") == 0) PERF = 1; if (strcmp(argv[i], "--stress-iters") == 0) STRESS_MULTIPLIER = atoi(argv[i+1]); if (strcmp(argv[i], "--ref-data-dir") == 0) ref_data_prefix = std::string(argv[i+1]); if (strcmp(argv[i], "--graph-data-dir") == 0) graph_data_prefix = std::string(argv[i+1]); } return RUN_ALL_TESTS(); return 0; }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/tests/nvgraph_test.cpp
#include "gtest/gtest.h" #include "valued_csr_graph.hxx" #include "nvgraphP.h" #include "nvgraph.h" #include <cstring> class NvgraphAPITest : public ::testing::Test { public: NvgraphAPITest() : handle(NULL) {} protected: static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() { if (handle == NULL) { status = nvgraphCreate(&handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } } virtual void TearDown() { if (handle != NULL) { status = nvgraphDestroy(handle); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); handle = NULL; } } nvgraphStatus_t status; nvgraphHandle_t handle; cudaStream_t *stream; }; nvgraphCSRTopology32I_st topoData; void createTopo() { // nvgraphStatus_t mystatus; topoData.nvertices = 4; topoData.nedges = 5; int offsets[6]; //{0,1,3,4,5,5}; offsets[0] = 0; offsets[1] = 1; offsets[2] = 3; offsets[3] = 4; offsets[4] = 5; offsets[5] = 5; topoData.source_offsets= offsets; int neighborhood[5]; neighborhood[0]=0; neighborhood[1]=2; neighborhood[2]=3; neighborhood[3]=4; neighborhood[4]=4; topoData.destination_indices = neighborhood; }; TEST_F(NvgraphAPITest,NvgraphCreateDestroy) { } TEST_F(NvgraphAPITest,NvgraphStatusGetString ) { const char *ret_status_str; nvgraphStatus_t status = NVGRAPH_STATUS_SUCCESS; ret_status_str = nvgraphStatusGetString( status); const std::string success_str = "Success"; ASSERT_EQ( ret_status_str, success_str); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailNotInit) { // nvgraphStatus_t status; //status = nvgraphDestroy( handle); const std::string not_init_str = "nvGRAPH not initialized"; const char *ret_status_str; ret_status_str = nvgraphStatusGetString(NVGRAPH_STATUS_NOT_INITIALIZED); ASSERT_EQ( ret_status_str, not_init_str); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailAllocFailed) { const char *ret_status_str; const std::string alloc_failed = "nvGRAPH alloc failed"; ret_status_str = nvgraphStatusGetString(NVGRAPH_STATUS_ALLOC_FAILED); ASSERT_EQ( ret_status_str, alloc_failed); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailInvalidValue) { const char *ret_status_str; const std::string invalid_value = "nvGRAPH invalid value"; ret_status_str = nvgraphStatusGetString(NVGRAPH_STATUS_INVALID_VALUE); ASSERT_EQ( ret_status_str, invalid_value); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailArchMismatch) { const char *ret_status_str; const std::string arch_mismatch = "nvGRAPH arch mismatch"; ret_status_str = nvgraphStatusGetString(NVGRAPH_STATUS_ARCH_MISMATCH); ASSERT_EQ( ret_status_str, arch_mismatch); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailMappingError) { const char *ret_status_str; const std::string mapping_error = "nvGRAPH mapping error"; ret_status_str = nvgraphStatusGetString(NVGRAPH_STATUS_MAPPING_ERROR); ASSERT_EQ( ret_status_str, mapping_error); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailExecFailed) { const char *ret_status_str; const std::string exec_failed = "nvGRAPH execution failed"; ret_status_str = nvgraphStatusGetString(NVGRAPH_STATUS_EXECUTION_FAILED); ASSERT_EQ( ret_status_str, exec_failed); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailInternalError) { const char *ret_status_str; const std::string internal_error = "nvGRAPH internal error"; ret_status_str = nvgraphStatusGetString(NVGRAPH_STATUS_INTERNAL_ERROR); ASSERT_EQ( ret_status_str, internal_error); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailTypeNotSupported) { const char *ret_status_str; const std::string type_not_supported = "nvGRAPH type not supported"; ret_status_str = nvgraphStatusGetString(NVGRAPH_STATUS_TYPE_NOT_SUPPORTED); ASSERT_EQ( ret_status_str, type_not_supported); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailGraphTypeNotSupported) { const char *ret_status_str; const std::string type_not_supported = "nvGRAPH graph type not supported"; ret_status_str = nvgraphStatusGetString(NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED); ASSERT_EQ( ret_status_str, type_not_supported); } TEST_F(NvgraphAPITest,NvgraphStatusGetStringFailUnknownNvgraphStatus) { const char *ret_status_str; const std::string unknown_nvgraph_status = "Unknown nvGRAPH Status"; ret_status_str = nvgraphStatusGetString((nvgraphStatus_t)11); ASSERT_EQ( ret_status_str, unknown_nvgraph_status); } TEST_F(NvgraphAPITest,NvgraphCreateGraphDescr) { nvgraphGraphDescr_t G=NULL; status = nvgraphCreateGraphDescr(handle, &G); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphCreateDestroyGraphDescr) { nvgraphGraphDescr_t G=NULL; status = nvgraphCreateGraphDescr(handle, &G); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, G); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphCreateDestroyGraphDescr_CornerCases) { nvgraphGraphDescr_t G = NULL; status = nvgraphDestroyGraphDescr(handle, G); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); } TEST_F(NvgraphAPITest,NvgraphGraphDescrSetCSRTopology) { nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); nvgraphCSRTopology32I_st topoData; topoData.nvertices = 0; topoData.nedges = 0; topoData.source_offsets = NULL; topoData.destination_indices = NULL; // Bad topology, missing all entries, should fail status=nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); topoData.nvertices = 4; topoData.nedges = 4; // Bad topology, missing all offsets and indices, should fail status=nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); int offsets[6]; //{0,1,3,4,5,5}; offsets[0] = 0; offsets[1] = 1; offsets[2] = 3; offsets[3] = 4; offsets[4] = 5; offsets[5] = 5; topoData.source_offsets= offsets; // Bad topology, missing destination_indices, should fail status=nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); int indices[4]; indices[0] = 1; indices[0] = 2; indices[0] = 3; indices[0] = 4; topoData.destination_indices = indices; // Should be ok now status=nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphDestroyGraphDescr(handle, descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphGraphDescrSetGetTopologyCSR) { nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // 1, 0, 0, 0, 0, 0, 0 // 0, 1, 0, 0, 0, 0, 0 // 0, 0, 0, 0, 0, 0, 0 // 1, 0, 0, 0, 0, 0, 1 // 1, 1, 1, 0, 0, 0, 0 // 0, 0, 0, 0, 0, 0, 0 // 1, 1, 1, 0, 0, 0, 1 // indptr=[0 1 2 2 4 7 7 11] // 8 // indices=[0 1 0 6 0 1 2 0 1 2 6] // 11 // n=7 // nnz=11 int rowPtr[] = {0, 1, 2, 2, 4, 7, 7, 11}; int colInd[] = {0, 1, 0, 6, 0, 1, 2, 0, 1, 2, 6}; nvgraphCSRTopology32I_st topoData; topoData.nedges = 11; // nnz topoData.nvertices = 7; // n topoData.source_offsets = rowPtr; topoData.destination_indices = colInd; status=nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status=nvgraphGetGraphStructure(handle, descrG, NULL, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // Check TType return value nvgraphTopologyType_t TType; status=nvgraphGetGraphStructure(handle, descrG, NULL, &TType); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(NVGRAPH_CSR_32, TType); // Check topoGet nedges and nvertices nvgraphCSRTopology32I_st topoDataGet; topoDataGet.nvertices=0; topoDataGet.nedges=0; topoDataGet.source_offsets=NULL; topoDataGet.destination_indices=NULL; status=nvgraphGetGraphStructure(handle, descrG, (void *)&topoDataGet, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(topoData.nvertices, topoDataGet.nvertices); ASSERT_EQ(topoData.nedges, topoDataGet.nedges); // Check topoGet nedges, nvertices and offsets topoDataGet.nvertices=0; topoDataGet.nedges=0; int rowPtrGet[8]; rowPtrGet[0]=0; rowPtrGet[1]=0; rowPtrGet[2]=0; rowPtrGet[3]=0; rowPtrGet[4]=0; rowPtrGet[5]=0; rowPtrGet[6]=0; rowPtrGet[7]=0; topoDataGet.source_offsets=rowPtrGet; topoDataGet.destination_indices=NULL; status=nvgraphGetGraphStructure(handle, descrG, (void *)&topoDataGet, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(topoData.nvertices, topoDataGet.nvertices); ASSERT_EQ(topoData.nedges, topoDataGet.nedges); ASSERT_EQ(rowPtr[0], rowPtrGet[0]); ASSERT_EQ(rowPtr[1], rowPtrGet[1]); ASSERT_EQ(rowPtr[2], rowPtrGet[2]); ASSERT_EQ(rowPtr[3], rowPtrGet[3]); ASSERT_EQ(rowPtr[4], rowPtrGet[4]); ASSERT_EQ(rowPtr[5], rowPtrGet[5]); ASSERT_EQ(rowPtr[6], rowPtrGet[6]); ASSERT_EQ(rowPtr[7], rowPtrGet[7]); // Check topoGet topoDataGet.nvertices=0; topoDataGet.nedges=0; rowPtrGet[0]=0; rowPtrGet[1]=0; rowPtrGet[2]=0; rowPtrGet[3]=0; rowPtrGet[4]=0; rowPtrGet[5]=0; rowPtrGet[6]=0; rowPtrGet[7]=0; int colIndGet[11]; colIndGet[0]=0; colIndGet[1]=0; colIndGet[2]=0; colIndGet[3]=0; colIndGet[4]=0; colIndGet[5]=0; colIndGet[6]=0; colIndGet[7]=0; colIndGet[8]=0; colIndGet[9]=0; colIndGet[10]=0; topoDataGet.source_offsets=rowPtrGet; topoDataGet.destination_indices=colIndGet; status=nvgraphGetGraphStructure(handle, descrG, (void *)&topoDataGet, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(topoData.nvertices, topoDataGet.nvertices); ASSERT_EQ(topoData.nedges, topoDataGet.nedges); ASSERT_EQ(rowPtr[0], rowPtrGet[0]); ASSERT_EQ(rowPtr[1], rowPtrGet[1]); ASSERT_EQ(rowPtr[2], rowPtrGet[2]); ASSERT_EQ(rowPtr[3], rowPtrGet[3]); ASSERT_EQ(rowPtr[4], rowPtrGet[4]); ASSERT_EQ(rowPtr[5], rowPtrGet[5]); ASSERT_EQ(rowPtr[6], rowPtrGet[6]); ASSERT_EQ(rowPtr[7], rowPtrGet[7]); ASSERT_EQ(colInd[0], colIndGet[0]); ASSERT_EQ(colInd[1], colIndGet[1]); ASSERT_EQ(colInd[2], colIndGet[2]); ASSERT_EQ(colInd[3], colIndGet[3]); ASSERT_EQ(colInd[4], colIndGet[4]); ASSERT_EQ(colInd[5], colIndGet[5]); ASSERT_EQ(colInd[6], colIndGet[6]); ASSERT_EQ(colInd[7], colIndGet[7]); ASSERT_EQ(colInd[8], colIndGet[8]); ASSERT_EQ(colInd[9], colIndGet[9]); ASSERT_EQ(colInd[10], colIndGet[10]); // Check all TType=NVGRAPH_CSC_32; topoDataGet.nvertices=0; topoDataGet.nedges=0; rowPtrGet[0]=0; rowPtrGet[1]=0; rowPtrGet[2]=0; rowPtrGet[3]=0; rowPtrGet[4]=0; rowPtrGet[5]=0; rowPtrGet[6]=0; rowPtrGet[7]=0; colIndGet[0]=0; colIndGet[1]=0; colIndGet[2]=0; colIndGet[3]=0; colIndGet[4]=0; colIndGet[5]=0; colIndGet[6]=0; colIndGet[7]=0; colIndGet[8]=0; colIndGet[9]=0; colIndGet[10]=0; topoDataGet.source_offsets=rowPtrGet; topoDataGet.destination_indices=colIndGet; status=nvgraphGetGraphStructure(handle, descrG, (void *)&topoDataGet, &TType); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(NVGRAPH_CSR_32, TType); ASSERT_EQ(topoData.nvertices, topoDataGet.nvertices); ASSERT_EQ(topoData.nedges, topoDataGet.nedges); ASSERT_EQ(rowPtr[0], rowPtrGet[0]); ASSERT_EQ(rowPtr[1], rowPtrGet[1]); ASSERT_EQ(rowPtr[2], rowPtrGet[2]); ASSERT_EQ(rowPtr[3], rowPtrGet[3]); ASSERT_EQ(rowPtr[4], rowPtrGet[4]); ASSERT_EQ(rowPtr[5], rowPtrGet[5]); ASSERT_EQ(rowPtr[6], rowPtrGet[6]); ASSERT_EQ(rowPtr[7], rowPtrGet[7]); ASSERT_EQ(colInd[0], colIndGet[0]); ASSERT_EQ(colInd[1], colIndGet[1]); ASSERT_EQ(colInd[2], colIndGet[2]); ASSERT_EQ(colInd[3], colIndGet[3]); ASSERT_EQ(colInd[4], colIndGet[4]); ASSERT_EQ(colInd[5], colIndGet[5]); ASSERT_EQ(colInd[6], colIndGet[6]); ASSERT_EQ(colInd[7], colIndGet[7]); ASSERT_EQ(colInd[8], colIndGet[8]); ASSERT_EQ(colInd[9], colIndGet[9]); ASSERT_EQ(colInd[10], colIndGet[10]); status = nvgraphDestroyGraphDescr(handle,descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphGraphDescrSetGetTopologyCSC) { nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // 1, 0, 0, 0, 0, 0, 0 // 0, 1, 0, 0, 0, 0, 0 // 0, 0, 0, 0, 0, 0, 0 // 1, 0, 0, 0, 0, 0, 1 // 1, 1, 1, 0, 0, 0, 0 // 0, 0, 0, 0, 0, 0, 0 // 1, 1, 1, 0, 0, 0, 1 // offsets=[0 4 7 9 9 9 9 11] // indices=[0 3 4 6 1 4 6 4 6 3 6] // n=7 // nnz=11 int rowInd[] = {0, 3, 4, 6, 1, 4, 6, 4, 6, 3, 6}; int colPtr[] = {0, 4, 7, 9, 9, 9, 9, 11}; nvgraphCSCTopology32I_st topoData; topoData.nedges = 11; // nnz topoData.nvertices = 7; // n topoData.destination_offsets = colPtr; topoData.source_indices = rowInd; status=nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status=nvgraphGetGraphStructure(handle, descrG, NULL, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // Check TType return value nvgraphTopologyType_t TType; status=nvgraphGetGraphStructure(handle, descrG, NULL, &TType); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(NVGRAPH_CSR_32, TType); // Check topoGet nedges and nvertices nvgraphCSCTopology32I_st topoDataGet; topoDataGet.nvertices=0; topoDataGet.nedges=0; topoDataGet.destination_offsets=NULL; topoDataGet.source_indices=NULL; status=nvgraphGetGraphStructure(handle, descrG, (void *)&topoDataGet, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(topoData.nvertices, topoDataGet.nvertices); ASSERT_EQ(topoData.nedges, topoDataGet.nedges); // Check topoGet nedges, nvertices and offsets topoDataGet.nvertices=0; topoDataGet.nedges=0; int colPtrGet[8]; colPtrGet[0]=0; colPtrGet[1]=0; colPtrGet[2]=0; colPtrGet[3]=0; colPtrGet[4]=0; colPtrGet[5]=0; colPtrGet[6]=0; colPtrGet[7]=0; topoDataGet.destination_offsets=colPtrGet; topoDataGet.source_indices=NULL; status=nvgraphGetGraphStructure(handle, descrG, (void *)&topoDataGet, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(topoData.nvertices, topoDataGet.nvertices); ASSERT_EQ(topoData.nedges, topoDataGet.nedges); ASSERT_EQ(colPtr[0], colPtrGet[0]); ASSERT_EQ(colPtr[1], colPtrGet[1]); ASSERT_EQ(colPtr[2], colPtrGet[2]); ASSERT_EQ(colPtr[3], colPtrGet[3]); ASSERT_EQ(colPtr[4], colPtrGet[4]); ASSERT_EQ(colPtr[5], colPtrGet[5]); ASSERT_EQ(colPtr[6], colPtrGet[6]); ASSERT_EQ(colPtr[7], colPtrGet[7]); // Check topoGet topoDataGet.nvertices=0; topoDataGet.nedges=0; colPtrGet[0]=0; colPtrGet[1]=0; colPtrGet[2]=0; colPtrGet[3]=0; colPtrGet[4]=0; colPtrGet[5]=0; colPtrGet[6]=0; colPtrGet[7]=0; int rowIndGet[11]; rowIndGet[0]=0; rowIndGet[1]=0; rowIndGet[2]=0; rowIndGet[3]=0; rowIndGet[4]=0; rowIndGet[5]=0; rowIndGet[6]=0; rowIndGet[7]=0; rowIndGet[8]=0; rowIndGet[9]=0; rowIndGet[10]=0; topoDataGet.destination_offsets=colPtrGet; topoDataGet.source_indices=rowIndGet; status=nvgraphGetGraphStructure(handle, descrG, (void *)&topoDataGet, NULL); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(topoData.nvertices, topoDataGet.nvertices); ASSERT_EQ(topoData.nedges, topoDataGet.nedges); ASSERT_EQ(colPtr[0], colPtrGet[0]); ASSERT_EQ(colPtr[1], colPtrGet[1]); ASSERT_EQ(colPtr[2], colPtrGet[2]); ASSERT_EQ(colPtr[3], colPtrGet[3]); ASSERT_EQ(colPtr[4], colPtrGet[4]); ASSERT_EQ(colPtr[5], colPtrGet[5]); ASSERT_EQ(colPtr[6], colPtrGet[6]); ASSERT_EQ(colPtr[7], colPtrGet[7]); ASSERT_EQ(rowInd[0], rowIndGet[0]); ASSERT_EQ(rowInd[1], rowIndGet[1]); ASSERT_EQ(rowInd[2], rowIndGet[2]); ASSERT_EQ(rowInd[3], rowIndGet[3]); ASSERT_EQ(rowInd[4], rowIndGet[4]); ASSERT_EQ(rowInd[5], rowIndGet[5]); ASSERT_EQ(rowInd[6], rowIndGet[6]); ASSERT_EQ(rowInd[7], rowIndGet[7]); ASSERT_EQ(rowInd[8], rowIndGet[8]); ASSERT_EQ(rowInd[9], rowIndGet[9]); ASSERT_EQ(rowInd[10], rowIndGet[10]); // Check all TType=NVGRAPH_CSC_32; topoDataGet.nvertices=0; topoDataGet.nedges=0; colPtrGet[0]=0; colPtrGet[1]=0; colPtrGet[2]=0; colPtrGet[3]=0; colPtrGet[4]=0; colPtrGet[5]=0; colPtrGet[6]=0; colPtrGet[7]=0; rowIndGet[0]=0; rowIndGet[1]=0; rowIndGet[2]=0; rowIndGet[3]=0; rowIndGet[4]=0; rowIndGet[5]=0; rowIndGet[6]=0; rowIndGet[7]=0; rowIndGet[8]=0; rowIndGet[9]=0; rowIndGet[10]=0; topoDataGet.destination_offsets=colPtrGet; topoDataGet.source_indices=rowIndGet; status=nvgraphGetGraphStructure(handle, descrG, (void *)&topoDataGet, &TType); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ(NVGRAPH_CSR_32, TType); ASSERT_EQ(topoData.nvertices, topoDataGet.nvertices); ASSERT_EQ(topoData.nedges, topoDataGet.nedges); ASSERT_EQ(colPtr[0], colPtrGet[0]); ASSERT_EQ(colPtr[1], colPtrGet[1]); ASSERT_EQ(colPtr[2], colPtrGet[2]); ASSERT_EQ(colPtr[3], colPtrGet[3]); ASSERT_EQ(colPtr[4], colPtrGet[4]); ASSERT_EQ(colPtr[5], colPtrGet[5]); ASSERT_EQ(colPtr[6], colPtrGet[6]); ASSERT_EQ(colPtr[7], colPtrGet[7]); ASSERT_EQ(rowInd[0], rowIndGet[0]); ASSERT_EQ(rowInd[1], rowIndGet[1]); ASSERT_EQ(rowInd[2], rowIndGet[2]); ASSERT_EQ(rowInd[3], rowIndGet[3]); ASSERT_EQ(rowInd[4], rowIndGet[4]); ASSERT_EQ(rowInd[5], rowIndGet[5]); ASSERT_EQ(rowInd[6], rowIndGet[6]); ASSERT_EQ(rowInd[7], rowIndGet[7]); ASSERT_EQ(rowInd[8], rowIndGet[8]); ASSERT_EQ(rowInd[9], rowIndGet[9]); ASSERT_EQ(rowInd[10], rowIndGet[10]); status = nvgraphDestroyGraphDescr(handle,descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphGraphDescrSetGetVertexDataSingleFloat) { typedef float T; nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /* Create topology before we load data */ createTopo(); status = nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T *vertexvals; vertexvals = (T *) malloc(4*sizeof(T)); vertexvals[0]=0.1; vertexvals[1]=2.0; vertexvals[2]=3.14; vertexvals[3]=0; // size_t numsets=1; cudaDataType_t type_v[1] = {sizeof(T) > 4 ? CUDA_R_64F : CUDA_R_32F}; status = nvgraphAllocateVertexData(handle, descrG, 1, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, descrG, (void *)vertexvals, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T *getvals; getvals = (T *)malloc(4*sizeof(T)); status = nvgraphGetVertexData(handle, descrG, (void *)getvals, 0); ASSERT_EQ( getvals[0], vertexvals[0]); ASSERT_EQ( getvals[1], vertexvals[1]); ASSERT_EQ( getvals[2], vertexvals[2]); ASSERT_EQ( getvals[3], vertexvals[3]); free(vertexvals); free(getvals); status = nvgraphDestroyGraphDescr(handle,descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphSetGetVertexDataSingleDouble) { typedef double T; nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /* Create topology before we load data */ createTopo(); status = nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T *vertexvals; vertexvals = (T *) malloc(4*sizeof(T)); vertexvals[0]=0.1; vertexvals[1]=2.0; vertexvals[2]=3.14; vertexvals[3]=0; // size_t numsets=1; cudaDataType_t type_v[1] = {sizeof(T) > 4 ? CUDA_R_64F : CUDA_R_32F}; status = nvgraphAllocateVertexData(handle, descrG, 1, type_v); // nvgraph::Graph<int> *G = static_cast<nvgraph::Graph<int>*> (descrG->graph_handle); //status = nvgraphSetVertexData(handle, descrG, (void **)&vertexvals, numsets, type_v ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, descrG, (void *)vertexvals, 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T *getvals; getvals = (T *)malloc(4*sizeof(T)); status = nvgraphGetVertexData(handle, descrG, (void *)getvals, 0); ASSERT_EQ( getvals[0], vertexvals[0]); ASSERT_EQ( getvals[1], vertexvals[1]); ASSERT_EQ( getvals[2], vertexvals[2]); ASSERT_EQ( getvals[3], vertexvals[3]); free(vertexvals); free(getvals); status = nvgraphDestroyGraphDescr(handle,descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphSetGetVertexData_CornerCases) { nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /* Create topology before we load data */ createTopo(); status = nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); double vertexvals0[2] = {0.1, 1e21}; float vertexvals1[2] = {0.1f, 1e21f}; void* vertexptr[2] = {(void*) vertexvals0, (void*)vertexvals1}; size_t numsets=2; cudaDataType_t type_v[2] = {CUDA_R_64F, CUDA_R_32F}; status = nvgraphAllocateVertexData(handle, descrG, 1, type_v); status = nvgraphSetVertexData(NULL, descrG, (void *)vertexptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSetVertexData(handle, NULL, (void *)vertexptr[0], 0 ); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSetVertexData(handle, descrG, NULL, numsets ); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); // probably should be a success status = nvgraphSetVertexData(handle, descrG, (void **)&vertexptr, 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetVertexData(handle, descrG, (void **)&vertexptr, numsets ); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); { // type mismatch // double edge_data0 = 0.; // float edge_data1 =1.; // void* edge_ptr_bad[] = {(void*)&edge_data0, (void*)&edge_data1}; // cudaDataType_t type_bad[2] = {CUDA_R_32F, CUDA_R_32F}; //status = nvgraphSetEdgeData(handle, descrG, (void **)edge_ptr_bad, numsets ); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); } float getdoublevals0[2]; // double getdoublevals1[2]; status = nvgraphGetVertexData(NULL, descrG, (void *)getdoublevals0, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphGetVertexData(handle, NULL, (void *)getdoublevals0, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphGetVertexData(handle, descrG, (void *)NULL, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphGetVertexData(handle, descrG, (void *)getdoublevals0, 10); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphDestroyGraphDescr(handle,descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphSetGetVertexDataMulti) { nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /* Create topology data */ createTopo(); status = nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); // size_t numsets=3; cudaDataType_t type_v[3] = {CUDA_R_32F, CUDA_R_64F, CUDA_R_32F}; void **vertexvals; vertexvals = (void **)malloc(3*sizeof( void * )); vertexvals[0] = (float *) malloc(4*sizeof(float)); ((float *)vertexvals[0])[0]=0.1; ((float *)vertexvals[0])[1]=2.0; ((float *)vertexvals[0])[2]=3.14; ((float *)vertexvals[0])[3]=0; vertexvals[1] = (double *)malloc(4*sizeof(double)); ((double *)vertexvals[1])[0]=1.1e-10; ((double *)vertexvals[1])[1]=2.0e20; ((double *)vertexvals[1])[2]=3.14e-26; ((double *)vertexvals[1])[3]=0.34e3; vertexvals[2] = (float *)malloc(4*sizeof(float)); ((float *)vertexvals[2])[0]=1.1e-1; ((float *)vertexvals[2])[1]=2.0e2; ((float *)vertexvals[2])[2]=3.14e-2; ((float *)vertexvals[2])[3]=0.34e6; status = nvgraphAllocateVertexData(handle, descrG, 1, type_v); float *getfloatvals; getfloatvals = (float *)malloc(4*sizeof(float)); status = nvgraphSetVertexData(handle, descrG, (void *)vertexvals[0], 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphGetVertexData(handle, descrG, (void *)getfloatvals, 0); float *float_data=((float *)vertexvals[0]); ASSERT_EQ( (float)getfloatvals[0], float_data[0]); ASSERT_EQ( (float)getfloatvals[1], float_data[1]); ASSERT_EQ( (float)getfloatvals[2], float_data[2]); ASSERT_EQ( (float)getfloatvals[3], float_data[3]); double *getdoublevals; getdoublevals = (double *)malloc(4*sizeof(double)); status = nvgraphSetVertexData(handle, descrG, (void *)vertexvals[1], 1); status = nvgraphGetVertexData(handle, descrG, (void *)getdoublevals, 1); // double *double_data=((double *)vertexvals[1]); //ASSERT_EQ( (double)getdoublevals[0], double_data[0]); //ASSERT_EQ( (double)getdoublevals[1], double_data[1]); //ASSERT_EQ( (double)getdoublevals[2], double_data[2]); //ASSERT_EQ( (double)getdoublevals[3], double_data[3]); free(vertexvals[0]); free(vertexvals[1]); free(vertexvals[2]); free(vertexvals); free(getfloatvals); free(getdoublevals); status = nvgraphDestroyGraphDescr(handle,descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphSetGetEdgeDataSingleFloat) { typedef float T; nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /* Create topology */ createTopo(); status = nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T *edgevals; edgevals = (T *) malloc(5*sizeof(T)); edgevals[0]=0.1; edgevals[1]=2.0; edgevals[2]=3.14; edgevals[3]=0; edgevals[4]=10101.10101; // size_t numsets=1; cudaDataType_t type_v[1] = {sizeof(T) > 4 ? CUDA_R_64F : CUDA_R_32F}; status = nvgraphAllocateEdgeData(handle, descrG, 1, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, descrG, (void *)edgevals, 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T *getvals; getvals = (T *)malloc(5*sizeof(T)); status = nvgraphGetEdgeData(handle, descrG, (void *)getvals, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ( getvals[0], edgevals[0]); ASSERT_EQ( getvals[1], edgevals[1]); ASSERT_EQ( getvals[2], edgevals[2]); ASSERT_EQ( getvals[3], edgevals[3]); ASSERT_EQ( getvals[4], edgevals[4]); free(edgevals); free(getvals); status = nvgraphDestroyGraphDescr(handle,descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphSetGetEdgeDataSingleDouble) { typedef double T; nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /* Create topology */ createTopo(); status = nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T *edgevals; edgevals = (T *) malloc(5*sizeof(T)); edgevals[0]=0.1; edgevals[1]=2.0; edgevals[2]=3.14; edgevals[3]=0; edgevals[4]=10101.10101; // size_t numsets=1; cudaDataType_t type_v[1] = {sizeof(T) > 4 ? CUDA_R_64F : CUDA_R_32F}; status = nvgraphAllocateEdgeData(handle, descrG, 1, type_v); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(handle, descrG, (void *)edgevals, 0 ); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); T *getvals; getvals = (T *)malloc(5*sizeof(T)); status = nvgraphGetEdgeData(handle, descrG, (void *)getvals, 0); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); ASSERT_EQ( getvals[0], edgevals[0]); ASSERT_EQ( getvals[1], edgevals[1]); ASSERT_EQ( getvals[2], edgevals[2]); ASSERT_EQ( getvals[3], edgevals[3]); ASSERT_EQ( getvals[4], edgevals[4]); free(edgevals); free(getvals); status = nvgraphDestroyGraphDescr(handle,descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } TEST_F(NvgraphAPITest,NvgraphSetGetEdgeData_CornerCases) { nvgraphGraphDescr_t descrG=NULL; status = nvgraphCreateGraphDescr(handle, &descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); /* Create topology */ createTopo(); status = nvgraphSetGraphStructure(handle, descrG, (void *)&topoData, NVGRAPH_CSR_32); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); double edgevals0[1] = {0.1}; float edgevals1[1] = {0.1f}; void* edgeptr[2] = {(void*) edgevals0, (void*)edgevals1}; // size_t numsets=2; cudaDataType_t type_e[2] = {CUDA_R_64F, CUDA_R_32F}; status = nvgraphAllocateEdgeData(handle, descrG, 1, type_e); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); status = nvgraphSetEdgeData(NULL, descrG, edgeptr, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSetEdgeData(handle, NULL, edgeptr, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphSetEdgeData(handle, descrG, NULL, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); //status = nvgraphSetEdgeData(handle, descrG, edgeptr, 0); //ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); { // type mismatch // double vertexvals0[2] = {0.1, 1e21}; // float vertexvals1[2] = {0.1f, 1e21f}; // void* vertexptr_bad[2] = {(void*) vertexvals0, (void*)vertexvals1}; // cudaDataType_t type_bad[2] = {CUDA_R_32F, CUDA_R_32F}; //status = nvgraphSetVertexData(handle, descrG, (void **)vertexptr_bad, numsets, type_bad ); ASSERT_NE(NVGRAPH_STATUS_SUCCESS, status); } // float getdoublevals0[2]; // double getdoublevals1[2]; //status = nvgraphGetEdgeData(NULL, descrG, (void *)getdoublevals0, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); //status = nvgraphGetEdgeData(handle, NULL, (void *)getdoublevals0, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); //status = nvgraphGetEdgeData(handle, descrG, NULL, 0); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); //status = nvgraphGetEdgeData(handle, descrG, (void *)getdoublevals0, 10); ASSERT_EQ(NVGRAPH_STATUS_INVALID_VALUE, status); status = nvgraphDestroyGraphDescr(handle,descrG); ASSERT_EQ(NVGRAPH_STATUS_SUCCESS, status); } int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/nvgraph/cpp/tests
rapidsai_public_repos/nvgraph/cpp/tests/benchmarkScripts/run_nvgraph.sh
#!/bin/bash # ****************** Edit this ************************* #Path to nvgraph bin graphs # From p4matrices:2024 sync //matrices/p4matrices/graphs/... nvg_data_prefix="/home/afender/src/matrices/p4matrices/graphs" #Path to nvgraph # nvg_bin_prefix should contain a release build of nvgraph's ToT (from p4sw //sw/gpgpu/nvgraph/...) # and nvgraph_benchmark executable which is build along with nvgraph's tests nvg_bin_prefix="/home/afender/src/sw/sw/gpgpu/bin/x86_64_Linux_release" # ***************************************************** export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$nvg_bin_prefix export PATH=$PATH:$nvg_bin_prefix declare -a arr=( "$nvg_data_prefix/webbase1M/webbase-1M_T.mtx.bin" "$nvg_data_prefix/liveJournal/ljournal-2008_T.mtx.bin" "$nvg_data_prefix/webGoogle/web-Google_T.mtx.bin" "$nvg_data_prefix/citPatents/cit-Patents_T.mtx.bin" "$nvg_data_prefix/webBerkStan/web-BerkStan_T.mtx.bin" "$nvg_data_prefix/WikiTalk/wiki-Talk_T.mtx.bin" "$nvg_data_prefix/soc-liveJournal/soc-LiveJournal1_T.mtx.bin" # Warning : Twitter case works only on GPU with more than 12 GB of memory "$nvg_data_prefix/Twitter/twitter.bin" #Just for debug #"$nvg_data_prefix/small/small.bin" ) ## now loop through the above array for i in "${arr[@]}" do echo "Pagerank" echo "$i" echo "single precision" $nvg_bin_prefix/nvgraph_benchmark --pagerank "$i" 0.85 500 1E-6 --float --repeats 10 echo #echo "Pagerank" #echo "$i" #echo "double precision" #$nvg_bin_prefix/nvgraph_benchmark --pagerank "$i" 0.85 500 1E-6 --double --repeats 10 #echo done echo for i in "${arr[@]}" do echo "SSSP" echo "$i" echo "single precision" $nvg_bin_prefix/nvgraph_benchmark --sssp "$i" 0 --float --repeats 10 echo #echo "SSSP" #echo "$i" #echo "double precision" #$nvg_bin_prefix/nvgraph_benchmark --sssp "$i" 0 --double --repeats 10 #echo done echo for i in "${arr[@]}" do echo "Widest Path" echo "$i" echo "single precision" $nvg_bin_prefix/nvgraph_benchmark --widest "$i" 0 --float --repeats 10 echo #echo "Widest Path" #echo "$i" #echo "double precision" #$nvg_bin_prefix/nvgraph_benchmark --widest "$i" 0 --double --repeats 10 #echo done echo
0
rapidsai_public_repos/nvgraph/cpp/tests
rapidsai_public_repos/nvgraph/cpp/tests/benchmarkScripts/modularity_paper.sh
#!/bin/bash # ****************** Edit this ************************* #Path to nvgraph bin graphs # From p4matrices:2024 sync //matrices/p4matrices/dimacs10/... nvg_data_prefix="/home/mnaumov/cuda_matrices/p4matrices/dimacs10" #nvg_data_prefix="/home/afender/modularity/mat" #Path to nvgraph # nvg_bin_prefix should contain a release build of nvgraph's ToT (from p4sw //sw/gpgpu/nvgraph/...) # and nvgraph_benchmark executable which is build along with nvgraph's tests nvg_bin_prefix="/home/afender/modularity/sw/gpgpu/bin/x86_64_Linux_release" # ***************************************************** export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$nvg_bin_prefix export PATH=$PATH:$nvg_bin_prefix declare -a dataset=( "$nvg_data_prefix/preferentialAttachment.mtx" "$nvg_data_prefix/caidaRouterLevel.mtx" "$nvg_data_prefix/coAuthorsDBLP.mtx" "$nvg_data_prefix/citationCiteseer.mtx" "$nvg_data_prefix/coPapersDBLP.mtx" "$nvg_data_prefix/coPapersCiteseer.mtx" "/home/afender/modularity/as-Skitter.mtx" "/home/afender/modularity/hollywood-2009.mtx" #"$nvg_data_prefix/data.mtx" #"/home/afender/modularity/karate.mtx" #"$nvg_data_prefix/road_central.mtx" #"$nvg_data_prefix/road_usa.mtx" #"$nvg_data_prefix/rgg_n_2_23_s0.mtx" ) #One particular number of cluster for i in "${dataset[@]}" do $nvg_bin_prefix/nvgraph_benchmark --modularity "$i" 7 7 --double --repeats 4 done echo for i in "${dataset[@]}" do $nvg_bin_prefix/nvgraph_benchmark --modularity "$i" 7 7 --float --repeats 4 done echo #Spreadsheet 1 #declare -ia clusters=(2 3 5 7 11 17 19 23 29 31 37 41 43 47 53) #for i in "${dataset[@]}" #do # for j in "${clusters[@]}" # do # if [ $j -lt 10 ] # then # $nvg_bin_prefix/nvgraph_benchmark --modularity "$i" $j $j --double --repeats 4 # else # $nvg_bin_prefix/nvgraph_benchmark --modularity "$i" $j 7 --double --repeats 4 # fi # done # echo #done #echo #Spreadsheet 3 (same as 1 in single precision) #declare -ia clusters=(2 3 5 7 11 17 19 23 29 31 37 41 43 47 53) #for i in "${dataset[@]}" #do # for j in "${clusters[@]}" # do # if [ $j -lt 10 ] # then # $nvg_bin_prefix/nvgraph_benchmark --modularity "$i" $j $j --foat --repeats 4 # else # $nvg_bin_prefix/nvgraph_benchmark --modularity "$i" $j 7 --foat --repeats 4 # fi # done # echo #done #run only best case according to Spreadsheet 1 #$nvg_bin_prefix/nvgraph_benchmark --modularity "$nvg_data_prefix/preferentialAttachment.mtx" 7 7 --double --repeats 4 #$nvg_bin_prefix/nvgraph_benchmark --modularity "$nvg_data_prefix/caidaRouterLevel.mtx" 11 7 --double --repeats 4 #$nvg_bin_prefix/nvgraph_benchmark --modularity "$nvg_data_prefix/coAuthorsDBLP.mtx" 7 7 --double --repeats 4 #$nvg_bin_prefix/nvgraph_benchmark --modularity "$nvg_data_prefix/citationCiteseer.mtx" 17 7 --double --repeats 4 #$nvg_bin_prefix/nvgraph_benchmark --modularity "$nvg_data_prefix/coPapersDBLP.mtx" 73 7 --double --repeats 4 #$nvg_bin_prefix/nvgraph_benchmark --modularity "$nvg_data_prefix/coPapersCiteseer.mtx" 53 7 --double --repeats 4 #$nvg_bin_prefix/nvgraph_benchmark --modularity "/home/afender/modularity/as-Skitter.mtx" 7 7 --double --repeats 4 #$nvg_bin_prefix/nvgraph_benchmark --modularity "/home/afender/modularity/hollywood-2009.mtx" 11 7 --double --repeats 4 #Variation of the number of clusters and number of eigenpairs, independently on synthetic matrix #for (( i = 2; i <= 8; i++ )) #do # for (( j = $i ; j <= 32; j++ )) # do # $nvg_bin_prefix/nvgraph_benchmark --modularity "/home/afender/modularity/karate_5_block_dia.mtx" $j $i --double --repeats 3 # done # echo #done #echo #profiles #nvprof --profile-from-start off --export-profile coPapersDBLP.mtx_23clusters_3ev_32b.bin /home/afender/modularity/sw/gpgpu/bin/x86_64_Linux_release/nvgraph_benchmark --modularity "/home/mnaumov/cuda_matrices/p4matrices/dimacs10/coPapersDBLP.mtx" 23 3 --double --repeats 3 # /home/mnaumov/cuda_toolkit/cuda-linux64-mixed-rel-nightly/bin/nvprof --profile-from-start off --export-profile eigensolver_coPapersDBLP.mtx_4clusters_4ev_32b.bin /home/afender/modularity/sw/gpgpu/bin/x86_64_Linux_release/nvgraph_benchmark --modularity "/home/mnaumov/cuda_matrices/p4matrices/dimacs10/coPapersDBLP.mtx" 4 4 --double --repeats 1 # /home/mnaumov/cuda_toolkit/cuda-linux64-mixed-rel-nightly/bin/nvprof --profile-from-start off --export-profile total_coPapersDBLP.mtx_4clusters_4ev_32b.bin /home/afender/modularity/sw/gpgpu/bin/x86_64_Linux_release/nvgraph_benchmark --modularity "/home/mnaumov/cuda_matrices/p4matrices/dimacs10/coPapersDBLP.mtx" 4 4 --double --repeats 1 #small matrices #declare -a dataset_small=( #"$nvg_data_prefix/karate.mtx" #"$nvg_data_prefix/dolphins.mtx" ##"$nvg_data_prefix/chesapeake.mtx" #"$nvg_data_prefix/lesmis.mtx" #"$nvg_data_prefix/adjnoun.mtx" #"$nvg_data_prefix/polbooks.mtx" #"$nvg_data_prefix/football.mtx" #"$nvg_data_prefix/celegansneural.mtx" ##"$nvg_data_prefix/jazz.mtx" #"$nvg_data_prefix/netscience.mtx" ##"$nvg_data_prefix/email.mtx" #"$nvg_data_prefix/power.mtx" #"$nvg_data_prefix/hep-th.mtx" #"$nvg_data_prefix/polblogs.mtx" ##"$nvg_data_prefix/PGPgiantcompo.mtx" #"$nvg_data_prefix/cond-mat.mtx" #"$nvg_data_prefix/as-22july06.mtx" #"$nvg_data_prefix/cond-mat-2003.mtx" #"$nvg_data_prefix/astro-ph.mtx" #) #declare -ia clusters=(2 3 5 7 11 17 19 23 29 31) #for i in "${dataset_small[@]}" #do # for j in "${clusters[@]}" # do # if [ $j -lt 10 ] # then # $nvg_bin_prefix/nvgraph_benchmark --modularity "$i" $j $j --double --repeats 4 # else # $nvg_bin_prefix/nvgraph_benchmark --modularity "$i" $j 7 --double --repeats 4 # fi # done # echo #done #echo
0
rapidsai_public_repos/nvgraph/cpp/tests
rapidsai_public_repos/nvgraph/cpp/tests/benchmarkScripts/run_galois.sh
#!/bin/bash # ****************** Edit this ************************* # Path to local workspace containing p4matrices:2024 sync //matrices/p4matrices/graphs/... nvg_data_prefix="/home/afender/src/matrices/p4matrices/graphs" #Path to galois galois_root="/home/afender/soft/galois-2.3.0/build/default" # ***************************************************** export OMP_NUM_THREADS=24 declare -a arr=( #Small mtx just for debug #"$nvg_data_prefix/small/small.mtx" "$nvg_data_prefix/soc-liveJournal/soc-LiveJournal1.mtx" "$nvg_data_prefix/Twitter/twitter.mtx" ) ## now loop through the above array for i in "${arr[@]}" do echo "Pagerank" echo "$i" time $galois_root/tools/graph-convert/graph-convert -mtx2gr -edgeType=float32 -print-all-options $i $i.galois time $galois_root/tools/graph-convert/graph-convert -gr2tgr -edgeType=float32 -print-all-options $i.galois $i_T.galois time $galois_root/apps/pagerank/app-pagerank $i.galois -graphTranspose="$i_T.galois" -t=$OMP_NUM_THREADS echo done echo for i in "${arr[@]}" do echo "SSSP" echo "$i" time $galois_root/apps/sssp/app-sssp $i.galois -startNode=0 -t=$OMP_NUM_THREADS echo done echo
0
rapidsai_public_repos/nvgraph/cpp/tests
rapidsai_public_repos/nvgraph/cpp/tests/benchmarkScripts/run_graphMat.sh
#!/bin/bash # ****************** Edit this ************************* #******************************************************* #Path to graphMat binary data gm_data_prefix="/home-2/afender/GraphMat-master/data" #Path to graphMat binary gm_bin_prefix="/home-2/afender/GraphMat-master/bin" #Number of core to use in graphMat export OMP_NUM_THREADS=24 # ****************************************************** #******************************************************* # NOTE #twitter_graphMat.bin and live_journal_graphMat.bin are assumed to be in "gm_data_prefix" directory #******************************************************* # Requiered export according to the doc export KMP_AFFINITY=scatter #Pagerank runs numactl -i all $gm_bin_prefix/PageRank $gm_data_prefix/twitter.graphmat.bin numactl -i all $gm_bin_prefix/PageRank $gm_data_prefix/soc-LiveJournal1.graphmat.bin # SSSP runs # Warning: vertices seems to have 1-based indices (nvGraph use 0-base) numactl -i all $gm_bin_prefix/SSSP $gm_data_prefix/twitter.graphmat.bin 1 numactl -i all $gm_bin_prefix/SSSP $gm_data_prefix/soc-LiveJournal1.graphmat.bin 1
0
rapidsai_public_repos/nvgraph/cpp/cmake
rapidsai_public_repos/nvgraph/cpp/cmake/Templates/GoogleTest.CMakeLists.txt.cmake
cmake_minimum_required(VERSION 3.12) include(ExternalProject) ExternalProject_Add(GoogleTest GIT_REPOSITORY https://github.com/google/googletest.git GIT_TAG release-1.8.0 SOURCE_DIR "${GTEST_ROOT}/googletest" BINARY_DIR "${GTEST_ROOT}/build" INSTALL_DIR "${GTEST_ROOT}/install" CMAKE_ARGS ${GTEST_CMAKE_ARGS} -DCMAKE_INSTALL_PREFIX=${GTEST_ROOT}/install)
0
rapidsai_public_repos/nvgraph/cpp/cmake
rapidsai_public_repos/nvgraph/cpp/cmake/Modules/ConfigureGoogleTest.cmake
set(GTEST_ROOT "${CMAKE_BINARY_DIR}/googletest") set(GTEST_CMAKE_ARGS "") #" -Dgtest_build_samples=ON" #" -DCMAKE_VERBOSE_MAKEFILE=ON") if(NOT CMAKE_CXX11_ABI) message(STATUS "GTEST: Disabling the GLIBCXX11 ABI") list(APPEND GTEST_CMAKE_ARGS " -DCMAKE_C_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=0") list(APPEND GTEST_CMAKE_ARGS " -DCMAKE_CXX_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=0") elseif(CMAKE_CXX11_ABI) message(STATUS "GTEST: Enabling the GLIBCXX11 ABI") list(APPEND GTEST_CMAKE_ARGS " -DCMAKE_C_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=1") list(APPEND GTEST_CMAKE_ARGS " -DCMAKE_CXX_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=1") endif(NOT CMAKE_CXX11_ABI) configure_file("${CMAKE_SOURCE_DIR}/cmake/Templates/GoogleTest.CMakeLists.txt.cmake" "${GTEST_ROOT}/CMakeLists.txt") file(MAKE_DIRECTORY "${GTEST_ROOT}/build") file(MAKE_DIRECTORY "${GTEST_ROOT}/install") execute_process(COMMAND ${CMAKE_COMMAND} -G ${CMAKE_GENERATOR} . RESULT_VARIABLE GTEST_CONFIG WORKING_DIRECTORY ${GTEST_ROOT}) if(GTEST_CONFIG) message(FATAL_ERROR "Configuring GoogleTest failed: " ${GTEST_CONFIG}) endif(GTEST_CONFIG) set(PARALLEL_BUILD -j) if($ENV{PARALLEL_LEVEL}) set(NUM_JOBS $ENV{PARALLEL_LEVEL}) set(PARALLEL_BUILD "${PARALLEL_BUILD}${NUM_JOBS}") endif($ENV{PARALLEL_LEVEL}) if(${NUM_JOBS}) if(${NUM_JOBS} EQUAL 1) message(STATUS "GTEST BUILD: Enabling Sequential CMake build") elseif(${NUM_JOBS} GREATER 1) message(STATUS "GTEST BUILD: Enabling Parallel CMake build with ${NUM_JOBS} jobs") endif(${NUM_JOBS} EQUAL 1) else() message(STATUS "GTEST BUILD: Enabling Parallel CMake build with all threads") endif(${NUM_JOBS}) execute_process(COMMAND ${CMAKE_COMMAND} --build .. -- ${PARALLEL_BUILD} RESULT_VARIABLE GTEST_BUILD WORKING_DIRECTORY ${GTEST_ROOT}/build) if(GTEST_BUILD) message(FATAL_ERROR "Building GoogleTest failed: " ${GTEST_BUILD}) endif(GTEST_BUILD) message(STATUS "GoogleTest installed here: " ${GTEST_ROOT}/install) set(GTEST_INCLUDE_DIR "${GTEST_ROOT}/install/include") set(GTEST_LIBRARY_DIR "${GTEST_ROOT}/install/lib") set(GTEST_FOUND TRUE)
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/nvgraph_vector_kernels.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include "nvgraph_error.hxx" #include "nvgraph_vector_kernels.hxx" #include "debug_macros.h" namespace nvgraph { void check_size(size_t sz) { if (sz>INT_MAX) FatalError("Vector larger than INT_MAX", NVGRAPH_ERR_BAD_PARAMETERS); } template <typename ValueType_> void nrm1_raw_vec (ValueType_* vec, size_t n, ValueType_* res, cudaStream_t stream) { thrust::device_ptr<ValueType_> dev_ptr(vec); *res = thrust::reduce(dev_ptr, dev_ptr+n); cudaCheckError(); } template <typename ValueType_> void fill_raw_vec (ValueType_* vec, size_t n , ValueType_ value, cudaStream_t stream) { thrust::device_ptr<ValueType_> dev_ptr(vec); thrust::fill(dev_ptr, dev_ptr + n, value); cudaCheckError(); } template <typename ValueType_> void dump_raw_vec (ValueType_* vec, size_t n, int offset, cudaStream_t stream) { #ifdef DEBUG thrust::device_ptr<ValueType_> dev_ptr(vec); COUT().precision(15); COUT() << "sample size = "<< n << ", offset = "<< offset << std::endl; thrust::copy(dev_ptr+offset,dev_ptr+offset+n, std::ostream_iterator<ValueType_>(COUT(), " ")); cudaCheckError(); COUT() << std::endl; #endif } template <typename ValueType_> __global__ void flag_zeroes_kernel(int num_vertices, ValueType_* vec, int* flags) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; for (int r = tidx; r < num_vertices; r += blockDim.x * gridDim.x) { if (vec[r] != 0.0) flags[r] = 1; // NOTE 2 : alpha*0 + (1-alpha)*1 = (1-alpha) else flags[r] = 0; } } template <typename ValueType_> __global__ void dmv0_kernel(const ValueType_ * __restrict__ D, const ValueType_ * __restrict__ x, ValueType_ * __restrict__ y, int n) { //y=D*x int tidx = blockIdx.x*blockDim.x + threadIdx.x ; for (int i = tidx; i < n; i += blockDim.x * gridDim.x) y[i] = D[i]*x[i]; } template <typename ValueType_> __global__ void dmv1_kernel(const ValueType_ * __restrict__ D, const ValueType_ * __restrict__ x, ValueType_ * __restrict__ y, int n) { // y+=D*x int tidx = blockIdx.x*blockDim.x + threadIdx.x ; for (int i = tidx; i < n; i += blockDim.x * gridDim.x) y[i] += D[i]*x[i]; } template<typename ValueType_> void copy_vec(ValueType_ *vec1, size_t n, ValueType_ *res, cudaStream_t stream) { thrust::device_ptr<ValueType_> dev_ptr(vec1); thrust::device_ptr<ValueType_> res_ptr(res); #ifdef DEBUG //COUT() << "copy "<< n << " elements" << std::endl; #endif thrust::copy_n(dev_ptr, n, res_ptr); cudaCheckError(); //dump_raw_vec (res, n, 0); } template <typename ValueType_> void flag_zeros_raw_vec(size_t num_vertices, ValueType_* vec, int* flags, cudaStream_t stream) { int items_per_thread = 4; int num_threads = 128; int max_grid_size = 4096; check_size(num_vertices); int n = static_cast<int>(num_vertices); int num_blocks = std::min(max_grid_size, (n/(items_per_thread*num_threads))+1); flag_zeroes_kernel<<<num_blocks, num_threads, 0, stream>>>(num_vertices, vec, flags); cudaCheckError(); } template <typename ValueType_> void dmv (size_t num_vertices, ValueType_ alpha, ValueType_* D, ValueType_* x, ValueType_ beta, ValueType_* y, cudaStream_t stream) { int items_per_thread = 4; int num_threads = 128; int max_grid_size = 4096; check_size(num_vertices); int n = static_cast<int>(num_vertices); int num_blocks = std::min(max_grid_size, (n/(items_per_thread*num_threads))+1); if (alpha ==1.0 && beta == 0.0) dmv0_kernel<<<num_blocks, num_threads, 0, stream>>>(D, x, y, n); else if (alpha ==1.0 && beta == 1.0) dmv1_kernel<<<num_blocks, num_threads, 0, stream>>>(D, x, y, n); else FatalError("Not implemented case of y = D*x", NVGRAPH_ERR_BAD_PARAMETERS); cudaCheckError(); } template <typename IndexType_, typename ValueType_> void set_connectivity( size_t n, IndexType_ root, ValueType_ self_loop_val, ValueType_ unreachable_val, ValueType_* res, cudaStream_t stream) { fill_raw_vec(res, n, unreachable_val); cudaMemcpy(&res[root], &self_loop_val, sizeof(self_loop_val), cudaMemcpyHostToDevice); cudaCheckError(); } template void nrm1_raw_vec <float> (float* vec, size_t n, float* res, cudaStream_t stream); template void nrm1_raw_vec <double> (double* vec, size_t n, double* res, cudaStream_t stream); template void dmv <float>(size_t num_vertices, float alpha, float* D, float* x, float beta, float* y, cudaStream_t stream); template void dmv <double>(size_t num_vertices, double alpha, double* D, double* x, double beta, double* y, cudaStream_t stream); template void set_connectivity <int, float> (size_t n, int root, float self_loop_val, float unreachable_val, float* res, cudaStream_t stream); template void set_connectivity <int, double>(size_t n, int root, double self_loop_val, double unreachable_val, double* res, cudaStream_t stream); template void flag_zeros_raw_vec <float>(size_t num_vertices, float* vec, int* flags, cudaStream_t stream); template void flag_zeros_raw_vec <double>(size_t num_vertices, double* vec, int* flags, cudaStream_t stream); template void fill_raw_vec<float> (float* vec, size_t n, float value, cudaStream_t stream); template void fill_raw_vec<double> (double* vec, size_t n, double value, cudaStream_t stream); template void fill_raw_vec<int> (int* vec, size_t n, int value, cudaStream_t stream); template void fill_raw_vec<char> (char* vec, size_t n, char value, cudaStream_t stream); template void copy_vec<float>(float * vec1, size_t n, float *res, cudaStream_t stream); template void copy_vec<double>(double * vec1, size_t n, double *res, cudaStream_t stream); template void copy_vec<int>(int * vec1, size_t n, int *res, cudaStream_t stream); template void copy_vec<char>(char * vec1, size_t n, char *res, cudaStream_t stream); template void dump_raw_vec<float> (float* vec, size_t n, int off, cudaStream_t stream); template void dump_raw_vec<double> (double* vec, size_t n, int off, cudaStream_t stream); template void dump_raw_vec<int> (int* vec, size_t n, int off, cudaStream_t stream); template void dump_raw_vec<char> (char* vec, size_t n, int off, cudaStream_t stream); } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/pagerank_kernels.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <thrust/device_vector.h> #include "nvgraph_error.hxx" #include "nvgraph_vector_kernels.hxx" #include "pagerank_kernels.hxx" namespace nvgraph { template <typename ValueType_> __global__ void update_dn_kernel(int num_vertices, ValueType_* aa, ValueType_ beta) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; for (int r = tidx; r < num_vertices; r += blockDim.x * gridDim.x) { // NOTE 1 : a = alpha*a + (1-alpha)e if (aa[r] == 0.0) aa[r] = beta; // NOTE 2 : alpha*0 + (1-alpha)*1 = (1-alpha) } } template <typename ValueType_> void update_dangling_nodes(int num_vertices, ValueType_* dangling_nodes, ValueType_ damping_factor, cudaStream_t stream) { int num_threads = 256; int max_grid_size = 4096; int num_blocks = std::min(max_grid_size, (num_vertices/num_threads)+1); ValueType_ beta = 1.0-damping_factor; update_dn_kernel<<<num_blocks, num_threads, 0, stream>>>(num_vertices, dangling_nodes,beta); cudaCheckError(); } //Explicit template void update_dangling_nodes<double> (int num_vertices, double* dangling_nodes, double damping_factor, cudaStream_t stream); template void update_dangling_nodes<float> (int num_vertices, float* dangling_nodes, float damping_factor, cudaStream_t stream); } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/bfs_kernels.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <sm_utils.h> #include <cub/cub.cuh> #include <nvgraph_error.hxx> #define MAXBLOCKS 65535 #define WARP_SIZE 32 #define INT_SIZE 32 // // Bottom up macros // #define FILL_UNVISITED_QUEUE_DIMX 256 #define COUNT_UNVISITED_EDGES_DIMX 256 #define MAIN_BOTTOMUP_DIMX 256 #define MAIN_BOTTOMUP_NWARPS (MAIN_BOTTOMUP_DIMX/WARP_SIZE) #define LARGE_BOTTOMUP_DIMX 256 //Number of edges processed in the main bottom up kernel #define MAIN_BOTTOMUP_MAX_EDGES 6 //Power of 2 < 32 (strict <) #define BOTTOM_UP_LOGICAL_WARP_SIZE 4 // // Top down macros // // We will precompute the results the binsearch_maxle every TOP_DOWN_BUCKET_SIZE edges #define TOP_DOWN_BUCKET_SIZE 32 // DimX of the kernel #define TOP_DOWN_EXPAND_DIMX 256 // TOP_DOWN_EXPAND_DIMX edges -> NBUCKETS_PER_BLOCK buckets #define NBUCKETS_PER_BLOCK (TOP_DOWN_EXPAND_DIMX/TOP_DOWN_BUCKET_SIZE) // How many items_per_thread we can process with one bucket_offset loading // the -1 is here because we need the +1 offset #define MAX_ITEMS_PER_THREAD_PER_OFFSETS_LOAD (TOP_DOWN_BUCKET_SIZE - 1) // instruction parallelism // for how many edges will we create instruction parallelism #define TOP_DOWN_BATCH_SIZE 2 #define COMPUTE_BUCKET_OFFSETS_DIMX 512 //Other macros #define FLAG_ISOLATED_VERTICES_DIMX 128 //Number of vertices handled by one thread //Must be power of 2, lower than 32 #define FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD 4 //Number of threads involved in the "construction" of one int in the bitset #define FLAG_ISOLATED_VERTICES_THREADS_PER_INT (INT_SIZE/FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD) // // Parameters of the heuristic to switch between bottomup/topdown //Finite machine described in http://parlab.eecs.berkeley.edu/sites/all/parlab/files/main.pdf // using namespace nvgraph; namespace bfs_kernels { // // gives the equivalent vectors from a type // for the max val, would be better to use numeric_limits<>::max() once // cpp11 is allowed in nvgraph // template<typename > struct vec_t { typedef int4 vec4; typedef int2 vec2; }; template<> struct vec_t<int> { typedef int4 vec4; typedef int2 vec2; static const int max = INT_MAX; }; template<> struct vec_t<long long int> { typedef longlong4 vec4; typedef longlong2 vec2; static const long long int max = LLONG_MAX; }; // // ------------------------- Helper device functions ------------------- // __forceinline__ __device__ int getMaskNRightmostBitSet(int n) { if (n == INT_SIZE) return (~0); int mask = (1 << n) - 1; return mask; } __forceinline__ __device__ int getMaskNLeftmostBitSet(int n) { if (n == 0) return 0; int mask = ~((1 << (INT_SIZE - n)) - 1); return mask; } __forceinline__ __device__ int getNextZeroBit(int& val) { int ibit = __ffs(~val) - 1; val |= (1 << ibit); return ibit; } struct BitwiseAnd { template<typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return (a & b); } }; struct BitwiseOr { template<typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return (a | b); } }; template<typename IndexType> __device__ IndexType binsearch_maxle( const IndexType *vec, const IndexType val, IndexType low, IndexType high) { while (true) { if (low == high) return low; //we know it exists if ((low + 1) == high) return (vec[high] <= val) ? high : low; IndexType mid = low + (high - low) / 2; if (vec[mid] > val) high = mid - 1; else low = mid; } } // // ------------------------- Bottom up ------------------------- // // // fill_unvisited_queue_kernel // // Finding unvisited vertices in the visited_bmap, and putting them in the queue // Vertices represented by the same int in the bitmap are adjacent in the queue, and sorted // For instance, the queue can look like this : // 34 38 45 58 61 4 18 24 29 71 84 85 90 // Because they are represented by those ints in the bitmap : // [34 38 45 58 61] [4 18 24 29] [71 84 85 90] //visited_bmap_nints = the visited_bmap is made of that number of ints template<typename IndexType> __global__ void fill_unvisited_queue_kernel( int *visited_bmap, IndexType visited_bmap_nints, IndexType n, IndexType *unvisited, IndexType *unvisited_cnt) { typedef cub::BlockScan<int, FILL_UNVISITED_QUEUE_DIMX> BlockScan; __shared__ typename BlockScan::TempStorage scan_temp_storage; //When filling the "unvisited" queue, we use "unvisited_cnt" to know where to write in the queue (equivalent of int off = atomicAddd(unvisited_cnt, 1) ) //We will actually do only one atomicAdd per block - we first do a scan, then call one atomicAdd, and store the common offset for the block in //unvisited_common_block_offset __shared__ IndexType unvisited_common_block_offset; //We don't want threads divergence in the loop (we're going to call __syncthreads) //Using a block-only dependent in the condition of the loop for (IndexType block_v_idx = blockIdx.x * blockDim.x; block_v_idx < visited_bmap_nints; block_v_idx += blockDim.x * gridDim.x) { //Index of visited_bmap that this thread will compute IndexType v_idx = block_v_idx + threadIdx.x; int thread_visited_int = (v_idx < visited_bmap_nints) ? visited_bmap[v_idx] : (~0); //will be neutral in the next lines (virtual vertices all visited) //The last int can only be partially valid //If we are indeed taking care of the last visited int in this thread, //We need to first disable (ie set as "visited") the inactive bits (vertices >= n) if (v_idx == (visited_bmap_nints - 1)) { int active_bits = n - (INT_SIZE * v_idx); int inactive_bits = INT_SIZE - active_bits; int mask = getMaskNLeftmostBitSet(inactive_bits); thread_visited_int |= mask; //Setting inactive bits as visited } //Counting number of unvisited vertices represented by this int int n_unvisited_in_int = __popc(~thread_visited_int); int unvisited_thread_offset; //We will need to write n_unvisited_in_int unvisited vertices to the unvisited queue //We ask for that space when computing the block scan, that will tell where to write those //vertices in the queue, using the common offset of the block (see below) BlockScan(scan_temp_storage).ExclusiveSum(n_unvisited_in_int, unvisited_thread_offset); //Last thread knows how many vertices will be written to the queue by this block //Asking for that space in the queue using the global count, and saving the common offset if (threadIdx.x == (FILL_UNVISITED_QUEUE_DIMX - 1)) { IndexType total = unvisited_thread_offset + n_unvisited_in_int; unvisited_common_block_offset = atomicAdd(unvisited_cnt, total); } //syncthreads for two reasons : // - we need to broadcast unvisited_common_block_offset // - we will reuse scan_temp_storage (cf CUB doc) __syncthreads(); IndexType current_unvisited_index = unvisited_common_block_offset + unvisited_thread_offset; int nvertices_to_write = n_unvisited_in_int; // getNextZeroBit uses __ffs, which gives least significant bit set // which means that as long as n_unvisited_in_int is valid, // we will use valid bits while (nvertices_to_write > 0) { if (nvertices_to_write >= 4 && (current_unvisited_index % 4) == 0) { typename vec_t<IndexType>::vec4 vec_v; vec_v.x = v_idx * INT_SIZE + getNextZeroBit(thread_visited_int); vec_v.y = v_idx * INT_SIZE + getNextZeroBit(thread_visited_int); vec_v.z = v_idx * INT_SIZE + getNextZeroBit(thread_visited_int); vec_v.w = v_idx * INT_SIZE + getNextZeroBit(thread_visited_int); typename vec_t<IndexType>::vec4 *unvisited_i4 = reinterpret_cast<typename vec_t< IndexType>::vec4*>(&unvisited[current_unvisited_index]); *unvisited_i4 = vec_v; current_unvisited_index += 4; nvertices_to_write -= 4; } else if (nvertices_to_write >= 2 && (current_unvisited_index % 2) == 0) { typename vec_t<IndexType>::vec2 vec_v; vec_v.x = v_idx * INT_SIZE + getNextZeroBit(thread_visited_int); vec_v.y = v_idx * INT_SIZE + getNextZeroBit(thread_visited_int); typename vec_t<IndexType>::vec2 *unvisited_i2 = reinterpret_cast<typename vec_t< IndexType>::vec2*>(&unvisited[current_unvisited_index]); *unvisited_i2 = vec_v; current_unvisited_index += 2; nvertices_to_write -= 2; } else { IndexType v = v_idx * INT_SIZE + getNextZeroBit(thread_visited_int); unvisited[current_unvisited_index] = v; current_unvisited_index += 1; nvertices_to_write -= 1; } } } } //Wrapper template<typename IndexType> void fill_unvisited_queue( int *visited_bmap, IndexType visited_bmap_nints, IndexType n, IndexType *unvisited, IndexType *unvisited_cnt, cudaStream_t m_stream, bool deterministic) { dim3 grid, block; block.x = FILL_UNVISITED_QUEUE_DIMX; grid.x = min((IndexType) MAXBLOCKS, (visited_bmap_nints + block.x - 1) / block.x); fill_unvisited_queue_kernel<<<grid, block, 0, m_stream>>>( visited_bmap, visited_bmap_nints, n, unvisited, unvisited_cnt); cudaCheckError() ; } // // count_unvisited_edges_kernel // Couting the total number of unvisited edges in the graph - using an potentially unvisited queue // We need the current unvisited vertices to be in the unvisited queue // But visited vertices can be in the potentially_unvisited queue // We first check if the vertex is still unvisited before using it // Useful when switching from "Bottom up" to "Top down" // template<typename IndexType> __global__ void count_unvisited_edges_kernel(const IndexType *potentially_unvisited, const IndexType potentially_unvisited_size, const int *visited_bmap, IndexType *degree_vertices, IndexType *mu) { typedef cub::BlockReduce<IndexType, COUNT_UNVISITED_EDGES_DIMX> BlockReduce; __shared__ typename BlockReduce::TempStorage reduce_temp_storage; //number of undiscovered edges counted by this thread IndexType thread_unvisited_edges_count = 0; for (IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; idx < potentially_unvisited_size; idx += blockDim.x * gridDim.x) { IndexType u = potentially_unvisited[idx]; int u_visited_bmap = visited_bmap[u / INT_SIZE]; int is_visited = u_visited_bmap & (1 << (u % INT_SIZE)); if (!is_visited) thread_unvisited_edges_count += degree_vertices[u]; } //We need all thread_unvisited_edges_count to be ready before reducing __syncthreads(); IndexType block_unvisited_edges_count = BlockReduce(reduce_temp_storage).Sum(thread_unvisited_edges_count); //block_unvisited_edges_count is only defined is th.x == 0 if (threadIdx.x == 0) atomicAdd(mu, block_unvisited_edges_count); } //Wrapper template<typename IndexType> void count_unvisited_edges(const IndexType *potentially_unvisited, const IndexType potentially_unvisited_size, const int *visited_bmap, IndexType *node_degree, IndexType *mu, cudaStream_t m_stream) { dim3 grid, block; block.x = COUNT_UNVISITED_EDGES_DIMX; grid.x = min((IndexType) MAXBLOCKS, (potentially_unvisited_size + block.x - 1) / block.x); count_unvisited_edges_kernel<<<grid, block, 0, m_stream>>>( potentially_unvisited, potentially_unvisited_size, visited_bmap, node_degree, mu); cudaCheckError() ; } // // Main Bottom Up kernel // Here we will start to process unvisited vertices in the unvisited queue // We will only consider the first MAIN_BOTTOMUP_MAX_EDGES edges // If it's not possible to define a valid parent using only those edges, // add it to the "left_unvisited_queue" // // // We will use the "vertices represented by the same int in the visited bmap are adjacents and sorted in the unvisited queue" property // It is used to do a reduction locally and fully build the new visited_bmap // template<typename IndexType> __global__ void main_bottomup_kernel( const IndexType *unvisited, const IndexType unvisited_size, IndexType *left_unvisited, IndexType *left_unvisited_cnt, int *visited_bmap, const IndexType *row_ptr, const IndexType *col_ind, IndexType lvl, IndexType *new_frontier, IndexType *new_frontier_cnt, IndexType *distances, IndexType *predecessors, int *edge_mask) { typedef cub::BlockDiscontinuity<IndexType, MAIN_BOTTOMUP_DIMX> BlockDiscontinuity; typedef cub::WarpReduce<int> WarpReduce; typedef cub::BlockScan<int, MAIN_BOTTOMUP_DIMX> BlockScan; __shared__ typename BlockDiscontinuity::TempStorage discontinuity_temp_storage; __shared__ typename WarpReduce::TempStorage reduce_temp_storage; __shared__ typename BlockScan::TempStorage scan_temp_storage; //To write vertices in the frontier, //We will use a block scan to locally compute the offsets //frontier_common_block_offset contains the common offset for the block __shared__ IndexType frontier_common_block_offset; // When building the new visited_bmap, we reduce (using a bitwise and) the visited_bmap ints // from the vertices represented by the same int (for instance vertices 1, 5, 9, 13, 23) // vertices represented by the same int will be designed as part of the same "group" // To detect the deliminations between those groups, we use BlockDiscontinuity // Then we need to create the new "visited_bmap" within those group. // We use a warp reduction that takes into account limits between groups to do it // But a group can be cut in two different warps : in that case, the second warp // put the result of its local reduction in local_visited_bmap_warp_head // the first warp will then read it and finish the reduction __shared__ int local_visited_bmap_warp_head[MAIN_BOTTOMUP_NWARPS]; const int warpid = threadIdx.x / WARP_SIZE; const int laneid = threadIdx.x % WARP_SIZE; // we will call __syncthreads inside the loop // we need to keep complete block active for (IndexType block_off = blockIdx.x * blockDim.x; block_off < unvisited_size; block_off += blockDim.x * gridDim.x) { IndexType idx = block_off + threadIdx.x; // This thread will take care of unvisited_vertex // in the visited_bmap, it is represented by the int at index // visited_bmap_index = unvisited_vertex/INT_SIZE // it will be used by BlockDiscontinuity // to flag the separation between groups of vertices (vertices represented by different in in visited_bmap) IndexType visited_bmap_index[1]; //this is an array of size 1 because CUB needs one visited_bmap_index[0] = -1; IndexType unvisited_vertex = -1; // local_visited_bmap gives info on the visited bit of unvisited_vertex // // By default, everything is visited // This is because we only take care of unvisited vertices here, // The other are by default unvisited // If a vertex remain unvisited, we will notice it here // That's why by default we consider everything visited ( ie ~0 ) // If we fail to assign one parent to an unvisited vertex, we will // explicitly unset the bit int local_visited_bmap = (~0); int found = 0; int more_to_visit = 0; IndexType valid_parent; IndexType left_unvisited_off; if (idx < unvisited_size) { //Processing first STPV edges of unvisited v //If bigger than that, push to left_unvisited queue unvisited_vertex = unvisited[idx]; IndexType edge_begin = row_ptr[unvisited_vertex]; IndexType edge_end = row_ptr[unvisited_vertex + 1]; visited_bmap_index[0] = unvisited_vertex / INT_SIZE; IndexType degree = edge_end - edge_begin; for (IndexType edge = edge_begin; edge < min(edge_end, edge_begin + MAIN_BOTTOMUP_MAX_EDGES); ++edge) { if (edge_mask && !edge_mask[edge]) continue; IndexType parent_candidate = col_ind[edge]; if (distances[parent_candidate] == (lvl - 1)) { found = 1; valid_parent = parent_candidate; break; } } // This vertex will remain unvisited at the end of this kernel // Explicitly say it if (!found) local_visited_bmap &= ~(1 << (unvisited_vertex % INT_SIZE)); //let this one unvisited else { if (distances) distances[unvisited_vertex] = lvl; if (predecessors) predecessors[unvisited_vertex] = valid_parent; } //If we haven't found a parent and there's more edge to check if (!found && degree > MAIN_BOTTOMUP_MAX_EDGES) { left_unvisited_off = atomicAdd(left_unvisited_cnt, (IndexType) 1); //TODO scan more_to_visit = 1; } } // // We will separate vertices in group // Two vertices are in the same group if represented by same int in visited_bmap // ie u and v in same group <=> u/32 == v/32 // // We will now flag the head of those group (first element of each group) // // 1) All vertices within the same group are adjacent in the queue (cf fill_unvisited_queue) // 2) A group is of size <= 32, so a warp will contain at least one head, and a group will be contained // at most by two warps int is_head_a[1]; //CUB need an array BlockDiscontinuity(discontinuity_temp_storage).FlagHeads(is_head_a, visited_bmap_index, cub::Inequality()); int is_head = is_head_a[0]; // Computing the warp reduce within group // This primitive uses the is_head flags to know where the limits of the groups are // We use bitwise and as operator, because of the fact that 1 is the default value // If a vertex is unvisited, we have to explicitly ask for it int local_bmap_agg = WarpReduce(reduce_temp_storage).HeadSegmentedReduce( local_visited_bmap, is_head, BitwiseAnd()); // We need to take care of the groups cut in two in two different warps // Saving second part of the reduce here, then applying it on the first part bellow // Corner case : if the first thread of the warp is a head, then this group is not cut in two // and then we have to be neutral (for an bitwise and, it's an ~0) if (laneid == 0) { local_visited_bmap_warp_head[warpid] = (is_head) ? (~0) : local_bmap_agg; } //broadcasting local_visited_bmap_warp_head __syncthreads(); int head_ballot = nvgraph::utils::ballot(is_head); //As long as idx < unvisited_size, we know there's at least one head per warp int laneid_last_head_in_warp = INT_SIZE - 1 - __clz(head_ballot); int is_last_head_in_warp = (laneid == laneid_last_head_in_warp); // if laneid == 0 && is_last_head_in_warp, it's a special case where // a group of size 32 starts exactly at lane 0 // in that case, nothing to do (this group is not cut by a warp delimitation) // we also have to make sure that a warp actually exists after this one (this corner case is handled after) if (laneid != 0 && is_last_head_in_warp & (warpid + 1) < MAIN_BOTTOMUP_NWARPS) { local_bmap_agg &= local_visited_bmap_warp_head[warpid + 1]; } //Three cases : // -> This is the first group of the block - it may be cut in two (with previous block) // -> This is the last group of the block - same thing // -> This group is completely contained in this block if (warpid == 0 && laneid == 0) { //The first elt of this group considered in this block is unvisited_vertex //We know that's the case because elts are sorted in a group, and we are at laneid == 0 //We will do an atomicOr - we have to be neutral about elts < unvisited_vertex int iv = unvisited_vertex % INT_SIZE; // we know that this unvisited_vertex is valid int mask = getMaskNLeftmostBitSet(INT_SIZE - iv); local_bmap_agg &= mask; //we have to be neutral for elts < unvisited_vertex atomicOr(&visited_bmap[unvisited_vertex / INT_SIZE], local_bmap_agg); } else if (warpid == (MAIN_BOTTOMUP_NWARPS - 1) && laneid >= laneid_last_head_in_warp && // We need the other ones to go in else case idx < unvisited_size //we could be out ) { //Last head of the block //We don't know if this group is complete //last_v is the last unvisited_vertex of the group IN THIS block //we dont know about the rest - we have to be neutral about elts > last_v //the destination thread of the __shfl is active int laneid_max = min((IndexType) (WARP_SIZE - 1), (unvisited_size - (block_off + 32 * warpid))); IndexType last_v = nvgraph::utils::shfl( unvisited_vertex, laneid_max, WARP_SIZE, __activemask()); if (is_last_head_in_warp) { int ilast_v = last_v % INT_SIZE + 1; int mask = getMaskNRightmostBitSet(ilast_v); local_bmap_agg &= mask; //we have to be neutral for elts > last_unvisited_vertex atomicOr(&visited_bmap[unvisited_vertex / INT_SIZE], local_bmap_agg); } } else { //group completely in block if (is_head && idx < unvisited_size) { visited_bmap[unvisited_vertex / INT_SIZE] = local_bmap_agg; //no atomics needed, we know everything about this int } } //Saving in frontier int thread_frontier_offset; BlockScan(scan_temp_storage).ExclusiveSum(found, thread_frontier_offset); IndexType inclusive_sum = thread_frontier_offset + found; if (threadIdx.x == (MAIN_BOTTOMUP_DIMX - 1) && inclusive_sum) { frontier_common_block_offset = atomicAdd(new_frontier_cnt, inclusive_sum); } //1) Broadcasting frontier_common_block_offset //2) we want to reuse the *_temp_storage __syncthreads(); if (found) new_frontier[frontier_common_block_offset + thread_frontier_offset] = unvisited_vertex; if (more_to_visit) left_unvisited[left_unvisited_off] = unvisited_vertex; } } template<typename IndexType> void bottom_up_main( IndexType *unvisited, IndexType unvisited_size, IndexType *left_unvisited, IndexType *d_left_unvisited_idx, int *visited, const IndexType *row_ptr, const IndexType *col_ind, IndexType lvl, IndexType *new_frontier, IndexType *new_frontier_idx, IndexType *distances, IndexType *predecessors, int *edge_mask, cudaStream_t m_stream, bool deterministic) { dim3 grid, block; block.x = MAIN_BOTTOMUP_DIMX; grid.x = min((IndexType) MAXBLOCKS, ((unvisited_size + block.x - 1)) / block.x); main_bottomup_kernel<<<grid, block, 0, m_stream>>>(unvisited, unvisited_size, left_unvisited, d_left_unvisited_idx, visited, row_ptr, col_ind, lvl, new_frontier, new_frontier_idx, distances, predecessors, edge_mask); cudaCheckError() ; } // // bottom_up_large_degree_kernel // finishing the work started in main_bottomup_kernel for vertex with degree > MAIN_BOTTOMUP_MAX_EDGES && no parent found // template<typename IndexType> __global__ void bottom_up_large_degree_kernel( IndexType *left_unvisited, IndexType left_unvisited_size, int *visited, const IndexType *row_ptr, const IndexType *col_ind, IndexType lvl, IndexType *new_frontier, IndexType *new_frontier_cnt, IndexType *distances, IndexType *predecessors, int *edge_mask) { int logical_lane_id = threadIdx.x % BOTTOM_UP_LOGICAL_WARP_SIZE; int logical_warp_id = threadIdx.x / BOTTOM_UP_LOGICAL_WARP_SIZE; int logical_warps_per_block = blockDim.x / BOTTOM_UP_LOGICAL_WARP_SIZE; //Inactive threads are not a pb for __ballot (known behaviour) for (IndexType idx = logical_warps_per_block * blockIdx.x + logical_warp_id; idx < left_unvisited_size; idx += gridDim.x * logical_warps_per_block) { //Unvisited vertices - potentially in the next frontier IndexType v = left_unvisited[idx]; //Used only with symmetric graphs //Parents are included in v's neighbors IndexType first_i_edge = row_ptr[v] + MAIN_BOTTOMUP_MAX_EDGES; //we already have checked the first MAIN_BOTTOMUP_MAX_EDGES edges in find_unvisited IndexType end_i_edge = row_ptr[v + 1]; //We can have warp divergence in the next loop //It's not a pb because the behaviour of __ballot //is know with inactive threads for (IndexType i_edge = first_i_edge + logical_lane_id; i_edge < end_i_edge; i_edge += BOTTOM_UP_LOGICAL_WARP_SIZE) { IndexType valid_parent = -1; if (!edge_mask || edge_mask[i_edge]) { IndexType u = col_ind[i_edge]; IndexType lvl_u = distances[u]; if (lvl_u == (lvl - 1)) { valid_parent = u; } } unsigned int warp_valid_p_ballot = nvgraph::utils::ballot((valid_parent != -1)); int logical_warp_id_in_warp = (threadIdx.x % WARP_SIZE) / BOTTOM_UP_LOGICAL_WARP_SIZE; unsigned int mask = (1 << BOTTOM_UP_LOGICAL_WARP_SIZE) - 1; unsigned int logical_warp_valid_p_ballot = warp_valid_p_ballot >> (BOTTOM_UP_LOGICAL_WARP_SIZE * logical_warp_id_in_warp); logical_warp_valid_p_ballot &= mask; int chosen_thread = __ffs(logical_warp_valid_p_ballot) - 1; if (chosen_thread == logical_lane_id) { //Using only one valid parent (reduce bw) IndexType off = atomicAdd(new_frontier_cnt, (IndexType) 1); int m = 1 << (v % INT_SIZE); atomicOr(&visited[v / INT_SIZE], m); distances[v] = lvl; if (predecessors) predecessors[v] = valid_parent; new_frontier[off] = v; } if (logical_warp_valid_p_ballot) { break; } } } } template<typename IndexType> void bottom_up_large(IndexType *left_unvisited, IndexType left_unvisited_size, int *visited, const IndexType *row_ptr, const IndexType *col_ind, IndexType lvl, IndexType *new_frontier, IndexType *new_frontier_idx, IndexType *distances, IndexType *predecessors, int *edge_mask, cudaStream_t m_stream, bool deterministic) { dim3 grid, block; block.x = LARGE_BOTTOMUP_DIMX; grid.x = min( (IndexType) MAXBLOCKS, ((left_unvisited_size + block.x - 1) * BOTTOM_UP_LOGICAL_WARP_SIZE) / block.x); bottom_up_large_degree_kernel<<<grid, block, 0, m_stream>>>(left_unvisited, left_unvisited_size, visited, row_ptr, col_ind, lvl, new_frontier, new_frontier_idx, distances, predecessors, edge_mask); cudaCheckError() ; } // // // ------------------------------ Top down ------------------------------ // // // // compute_bucket_offsets_kernel // simply compute the position in the frontier corresponding all valid edges with index=TOP_DOWN_BUCKET_SIZE * k, k integer // template<typename IndexType> __global__ void compute_bucket_offsets_kernel( const IndexType *frontier_degrees_exclusive_sum, IndexType *bucket_offsets, const IndexType frontier_size, IndexType total_degree) { IndexType end = ((total_degree - 1 + TOP_DOWN_EXPAND_DIMX) / TOP_DOWN_EXPAND_DIMX * NBUCKETS_PER_BLOCK + 1); for (IndexType bid = blockIdx.x * blockDim.x + threadIdx.x; bid <= end; bid += gridDim.x * blockDim.x) { IndexType eid = min(bid * TOP_DOWN_BUCKET_SIZE, total_degree - 1); bucket_offsets[bid] = binsearch_maxle( frontier_degrees_exclusive_sum, eid, (IndexType) 0, frontier_size - 1); } } template<typename IndexType> void compute_bucket_offsets( IndexType *cumul, IndexType *bucket_offsets, IndexType frontier_size, IndexType total_degree, cudaStream_t m_stream) { dim3 grid, block; block.x = COMPUTE_BUCKET_OFFSETS_DIMX; grid.x = min( (IndexType) MAXBLOCKS, ((total_degree - 1 + TOP_DOWN_EXPAND_DIMX) / TOP_DOWN_EXPAND_DIMX * NBUCKETS_PER_BLOCK + 1 + block.x - 1) / block.x); compute_bucket_offsets_kernel<<<grid, block, 0, m_stream>>>(cumul, bucket_offsets, frontier_size, total_degree); cudaCheckError() ; } // // topdown_expand_kernel // Read current frontier and compute new one with top down paradigm // One thread = One edge // To know origin of edge, we have to find where is index_edge in the values of frontier_degrees_exclusive_sum (using a binary search, max less or equal than) // This index k will give us the origin of this edge, which is frontier[k] // This thread will then process the (linear_idx_thread - frontier_degrees_exclusive_sum[k])-ith edge of vertex frontier[k] // // To process blockDim.x = TOP_DOWN_EXPAND_DIMX edges, we need to first load NBUCKETS_PER_BLOCK bucket offsets - those will help us do the binary searches // We can load up to TOP_DOWN_EXPAND_DIMX of those bucket offsets - that way we prepare for the next MAX_ITEMS_PER_THREAD_PER_OFFSETS_LOAD * blockDim.x edges // // Once we have those offsets, we may still need a few values from frontier_degrees_exclusive_sum to compute exact index k // To be able to do it, we will load the values that we need from frontier_degrees_exclusive_sum in shared memory // We know that it will fit because we never add node with degree == 0 in the frontier, so we have an upper bound on the number of value to load (see below) // // We will then look which vertices are not visited yet : // 1) if the unvisited vertex is isolated (=> degree == 0), we mark it as visited, update distances and predecessors, and move on // 2) if the unvisited vertex has degree > 0, we add it to the "frontier_candidates" queue // // We then treat the candidates queue using the threadIdx.x < ncandidates // If we are indeed the first thread to discover that vertex (result of atomicOr(visited)) // We add it to the new frontier // template<typename IndexType> __global__ void topdown_expand_kernel( const IndexType *row_ptr, const IndexType *col_ind, const IndexType *frontier, const IndexType frontier_size, const IndexType totaldegree, const IndexType max_items_per_thread, const IndexType lvl, IndexType *new_frontier, IndexType *new_frontier_cnt, const IndexType *frontier_degrees_exclusive_sum, const IndexType *frontier_degrees_exclusive_sum_buckets_offsets, int *bmap, IndexType *distances, IndexType *predecessors, const int *edge_mask, const int *isolated_bmap, bool directed) { //BlockScan typedef cub::BlockScan<IndexType, TOP_DOWN_EXPAND_DIMX> BlockScan; __shared__ typename BlockScan::TempStorage scan_storage; // We will do a scan to know where to write in frontier // This will contain the common offset of the block __shared__ IndexType frontier_common_block_offset; __shared__ IndexType shared_buckets_offsets[TOP_DOWN_EXPAND_DIMX - NBUCKETS_PER_BLOCK + 1]; __shared__ IndexType shared_frontier_degrees_exclusive_sum[TOP_DOWN_EXPAND_DIMX + 1]; // // Frontier candidates local queue // We process TOP_DOWN_BATCH_SIZE vertices in parallel, so we need to be able to store everything // We also save the predecessors here, because we will not be able to retrieve it after // __shared__ IndexType shared_local_new_frontier_candidates[TOP_DOWN_BATCH_SIZE * TOP_DOWN_EXPAND_DIMX]; __shared__ IndexType shared_local_new_frontier_predecessors[TOP_DOWN_BATCH_SIZE * TOP_DOWN_EXPAND_DIMX]; __shared__ IndexType block_n_frontier_candidates; IndexType block_offset = (blockDim.x * blockIdx.x) * max_items_per_thread; IndexType n_items_per_thread_left = (totaldegree - block_offset + TOP_DOWN_EXPAND_DIMX - 1) / TOP_DOWN_EXPAND_DIMX; n_items_per_thread_left = min(max_items_per_thread, n_items_per_thread_left); for (; (n_items_per_thread_left > 0) && (block_offset < totaldegree); block_offset += MAX_ITEMS_PER_THREAD_PER_OFFSETS_LOAD * blockDim.x, n_items_per_thread_left -= MAX_ITEMS_PER_THREAD_PER_OFFSETS_LOAD) { // In this loop, we will process batch_set_size batches IndexType nitems_per_thread = min( n_items_per_thread_left, (IndexType) MAX_ITEMS_PER_THREAD_PER_OFFSETS_LOAD); // Loading buckets offset (see compute_bucket_offsets_kernel) if (threadIdx.x < (nitems_per_thread * NBUCKETS_PER_BLOCK + 1)) shared_buckets_offsets[threadIdx.x] = frontier_degrees_exclusive_sum_buckets_offsets[block_offset / TOP_DOWN_BUCKET_SIZE + threadIdx.x]; // We will use shared_buckets_offsets __syncthreads(); // // shared_buckets_offsets gives us a range of the possible indexes // for edge of linear_threadx, we are looking for the value k such as // k is the max value such as frontier_degrees_exclusive_sum[k] <= linear_threadx // // we have 0 <= k < frontier_size // but we also have : // // frontier_degrees_exclusive_sum_buckets_offsets[linear_threadx/TOP_DOWN_BUCKET_SIZE] // <= k // <= frontier_degrees_exclusive_sum_buckets_offsets[linear_threadx/TOP_DOWN_BUCKET_SIZE + 1] // // To find the exact value in that range, we need a few values from frontier_degrees_exclusive_sum (see below) // We will load them here // We will load as much as we can - if it doesn't fit we will make multiple iteration of the next loop // Because all vertices in frontier have degree > 0, we know it will fits if left + 1 = right (see below) //We're going to load values in frontier_degrees_exclusive_sum for batch [left; right[ //If it doesn't fit, --right until it does, then loop //It is excepted to fit on the first try, that's why we start right = nitems_per_thread IndexType left = 0; IndexType right = nitems_per_thread; while (left < nitems_per_thread) { // // Values that are necessary to compute the local binary searches // We only need those with indexes between extremes indexes of buckets_offsets // We need the next val for the binary search, hence the +1 // IndexType nvalues_to_load = shared_buckets_offsets[right * NBUCKETS_PER_BLOCK] - shared_buckets_offsets[left * NBUCKETS_PER_BLOCK] + 1; //If left = right + 1 we are sure to have nvalues_to_load < TOP_DOWN_EXPAND_DIMX+1 while (nvalues_to_load > (TOP_DOWN_EXPAND_DIMX + 1)) { --right; nvalues_to_load = shared_buckets_offsets[right * NBUCKETS_PER_BLOCK] - shared_buckets_offsets[left * NBUCKETS_PER_BLOCK] + 1; } IndexType nitems_per_thread_for_this_load = right - left; IndexType frontier_degrees_exclusive_sum_block_offset = shared_buckets_offsets[left * NBUCKETS_PER_BLOCK]; //TODO put again the nvalues_to_load == 1 if (threadIdx.x < nvalues_to_load) { shared_frontier_degrees_exclusive_sum[threadIdx.x] = frontier_degrees_exclusive_sum[frontier_degrees_exclusive_sum_block_offset + threadIdx.x]; } if (nvalues_to_load == (TOP_DOWN_EXPAND_DIMX + 1) && threadIdx.x == 0) { shared_frontier_degrees_exclusive_sum[TOP_DOWN_EXPAND_DIMX] = frontier_degrees_exclusive_sum[frontier_degrees_exclusive_sum_block_offset + TOP_DOWN_EXPAND_DIMX]; } //shared_frontier_degrees_exclusive_sum is in shared mem, we will use it, sync //TODO we don't use it if nvalues_to_load == 1 __syncthreads(); // Now we will process the edges // Here each thread will process nitems_per_thread_for_this_load for (IndexType item_index = 0; item_index < nitems_per_thread_for_this_load; item_index += TOP_DOWN_BATCH_SIZE) { // We process TOP_DOWN_BATCH_SIZE edge in parallel (instruction parallism) // Reduces latency IndexType current_max_edge_index = min(block_offset + (left + nitems_per_thread_for_this_load) * blockDim.x, totaldegree); //We will need vec_u (source of the edge) until the end if we need to save the predecessors //For others informations, we will reuse pointers on the go (nvcc does not color well the registers in that case) IndexType vec_u[TOP_DOWN_BATCH_SIZE]; IndexType local_buf1[TOP_DOWN_BATCH_SIZE]; IndexType local_buf2[TOP_DOWN_BATCH_SIZE]; IndexType *vec_frontier_degrees_exclusive_sum_index = &local_buf2[0]; #pragma unroll for (IndexType iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType ibatch = left + item_index + iv; IndexType gid = block_offset + ibatch * blockDim.x + threadIdx.x; if (gid < current_max_edge_index) { IndexType start_off_idx = (ibatch * blockDim.x + threadIdx.x) / TOP_DOWN_BUCKET_SIZE; IndexType bucket_start = shared_buckets_offsets[start_off_idx] - frontier_degrees_exclusive_sum_block_offset; IndexType bucket_end = shared_buckets_offsets[start_off_idx + 1] - frontier_degrees_exclusive_sum_block_offset; IndexType k = binsearch_maxle(shared_frontier_degrees_exclusive_sum, gid, bucket_start, bucket_end) + frontier_degrees_exclusive_sum_block_offset; vec_u[iv] = frontier[k]; // origin of this edge vec_frontier_degrees_exclusive_sum_index[iv] = frontier_degrees_exclusive_sum[k]; } else { vec_u[iv] = -1; vec_frontier_degrees_exclusive_sum_index[iv] = -1; } } IndexType *vec_row_ptr_u = &local_buf1[0]; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType u = vec_u[iv]; //row_ptr for this vertex origin u vec_row_ptr_u[iv] = (u != -1) ? row_ptr[u] : -1; } //We won't need row_ptr after that, reusing pointer IndexType *vec_dest_v = vec_row_ptr_u; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType thread_item_index = left + item_index + iv; IndexType gid = block_offset + thread_item_index * blockDim.x + threadIdx.x; IndexType row_ptr_u = vec_row_ptr_u[iv]; IndexType edge = row_ptr_u + gid - vec_frontier_degrees_exclusive_sum_index[iv]; if (edge_mask && !edge_mask[edge]) row_ptr_u = -1; //disabling edge //Destination of this edge vec_dest_v[iv] = (row_ptr_u != -1) ? col_ind[edge] : -1; } //We don't need vec_frontier_degrees_exclusive_sum_index anymore IndexType *vec_v_visited_bmap = vec_frontier_degrees_exclusive_sum_index; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType v = vec_dest_v[iv]; vec_v_visited_bmap[iv] = (v != -1) ? bmap[v / INT_SIZE] : (~0); //will look visited } // From now on we will consider v as a frontier candidate // If for some reason vec_candidate[iv] should be put in the new_frontier // Then set vec_candidate[iv] = -1 IndexType *vec_frontier_candidate = vec_dest_v; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType v = vec_frontier_candidate[iv]; int m = 1 << (v % INT_SIZE); int is_visited = vec_v_visited_bmap[iv] & m; if (is_visited) vec_frontier_candidate[iv] = -1; } if (directed) { //vec_v_visited_bmap is available IndexType *vec_is_isolated_bmap = vec_v_visited_bmap; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType v = vec_frontier_candidate[iv]; vec_is_isolated_bmap[iv] = (v != -1) ? isolated_bmap[v / INT_SIZE] : -1; } #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType v = vec_frontier_candidate[iv]; int m = 1 << (v % INT_SIZE); int is_isolated = vec_is_isolated_bmap[iv] & m; //If v is isolated, we will not add it to the frontier (it's not a frontier candidate) // 1st reason : it's useless // 2nd reason : it will make top down algo fail // we need each node in frontier to have a degree > 0 // If it is isolated, we just need to mark it as visited, and save distance and predecessor here. Not need to check return value of atomicOr if (is_isolated && v != -1) { int m = 1 << (v % INT_SIZE); atomicOr(&bmap[v / INT_SIZE], m); if (distances) distances[v] = lvl; if (predecessors) predecessors[v] = vec_u[iv]; //This is no longer a candidate, neutralize it vec_frontier_candidate[iv] = -1; } } } //Number of successor candidate hold by this thread IndexType thread_n_frontier_candidates = 0; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType v = vec_frontier_candidate[iv]; if (v != -1) ++thread_n_frontier_candidates; } // We need to have all nfrontier_candidates to be ready before doing the scan __syncthreads(); // We will put the frontier candidates in a local queue // Computing offsets IndexType thread_frontier_candidate_offset = 0; //offset inside block BlockScan(scan_storage).ExclusiveSum( thread_n_frontier_candidates, thread_frontier_candidate_offset); #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { //May have bank conflicts IndexType frontier_candidate = vec_frontier_candidate[iv]; if (frontier_candidate != -1) { shared_local_new_frontier_candidates[thread_frontier_candidate_offset] = frontier_candidate; shared_local_new_frontier_predecessors[thread_frontier_candidate_offset] = vec_u[iv]; ++thread_frontier_candidate_offset; } } if (threadIdx.x == (TOP_DOWN_EXPAND_DIMX - 1)) { //No need to add nsuccessor_candidate, even if its an //exclusive sum //We incremented the thread_frontier_candidate_offset block_n_frontier_candidates = thread_frontier_candidate_offset; } //broadcast block_n_frontier_candidates __syncthreads(); IndexType naccepted_vertices = 0; //We won't need vec_frontier_candidate after that IndexType *vec_frontier_accepted_vertex = vec_frontier_candidate; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { const int idx_shared = iv * blockDim.x + threadIdx.x; vec_frontier_accepted_vertex[iv] = -1; if (idx_shared < block_n_frontier_candidates) { IndexType v = shared_local_new_frontier_candidates[idx_shared]; //popping queue int m = 1 << (v % INT_SIZE); int q = atomicOr(&bmap[v / INT_SIZE], m); //atomicOr returns old if (!(m & q)) { //if this thread was the first to discover this node if (distances) distances[v] = lvl; if (predecessors) { IndexType pred = shared_local_new_frontier_predecessors[idx_shared]; predecessors[v] = pred; } vec_frontier_accepted_vertex[iv] = v; ++naccepted_vertices; } } } //We need naccepted_vertices to be ready __syncthreads(); IndexType thread_new_frontier_offset; BlockScan(scan_storage).ExclusiveSum(naccepted_vertices, thread_new_frontier_offset); if (threadIdx.x == (TOP_DOWN_EXPAND_DIMX - 1)) { IndexType inclusive_sum = thread_new_frontier_offset + naccepted_vertices; //for this thread, thread_new_frontier_offset + has_successor (exclusive sum) if (inclusive_sum) frontier_common_block_offset = atomicAdd(new_frontier_cnt, inclusive_sum); } //Broadcasting frontier_common_block_offset __syncthreads(); #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { const int idx_shared = iv * blockDim.x + threadIdx.x; if (idx_shared < block_n_frontier_candidates) { IndexType new_frontier_vertex = vec_frontier_accepted_vertex[iv]; if (new_frontier_vertex != -1) { IndexType off = frontier_common_block_offset + thread_new_frontier_offset++; //TODO Access is not good new_frontier[off] = new_frontier_vertex; } } } } //We need to keep shared_frontier_degrees_exclusive_sum coherent __syncthreads(); //Preparing for next load left = right; right = nitems_per_thread; } //we need to keep shared_buckets_offsets coherent __syncthreads(); } } template<typename IndexType> void frontier_expand(const IndexType *row_ptr, const IndexType *col_ind, const IndexType *frontier, const IndexType frontier_size, const IndexType totaldegree, const IndexType lvl, IndexType *new_frontier, IndexType *new_frontier_cnt, const IndexType *frontier_degrees_exclusive_sum, const IndexType *frontier_degrees_exclusive_sum_buckets_offsets, int *visited_bmap, IndexType *distances, IndexType *predecessors, const int *edge_mask, const int *isolated_bmap, bool directed, cudaStream_t m_stream, bool deterministic) { if (!totaldegree) return; dim3 block; block.x = TOP_DOWN_EXPAND_DIMX; IndexType max_items_per_thread = (totaldegree + MAXBLOCKS * block.x - 1) / (MAXBLOCKS * block.x); dim3 grid; grid.x = min( (totaldegree + max_items_per_thread * block.x - 1) / (max_items_per_thread * block.x), (IndexType) MAXBLOCKS); topdown_expand_kernel<<<grid, block, 0, m_stream>>>( row_ptr, col_ind, frontier, frontier_size, totaldegree, max_items_per_thread, lvl, new_frontier, new_frontier_cnt, frontier_degrees_exclusive_sum, frontier_degrees_exclusive_sum_buckets_offsets, visited_bmap, distances, predecessors, edge_mask, isolated_bmap, directed); cudaCheckError() ; } template<typename IndexType> __global__ void flag_isolated_vertices_kernel( IndexType n, int *isolated_bmap, const IndexType *row_ptr, IndexType *degrees, IndexType *nisolated) { typedef cub::BlockLoad<IndexType, FLAG_ISOLATED_VERTICES_DIMX, FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad; typedef cub::BlockStore<IndexType, FLAG_ISOLATED_VERTICES_DIMX, FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStore; typedef cub::BlockReduce<IndexType, FLAG_ISOLATED_VERTICES_DIMX> BlockReduce; typedef cub::WarpReduce<int, FLAG_ISOLATED_VERTICES_THREADS_PER_INT> WarpReduce; __shared__ typename BlockLoad::TempStorage load_temp_storage; __shared__ typename BlockStore::TempStorage store_temp_storage; __shared__ typename BlockReduce::TempStorage block_reduce_temp_storage; __shared__ typename WarpReduce::TempStorage warp_reduce_temp_storage[FLAG_ISOLATED_VERTICES_DIMX / FLAG_ISOLATED_VERTICES_THREADS_PER_INT]; __shared__ IndexType row_ptr_tail[FLAG_ISOLATED_VERTICES_DIMX]; for (IndexType block_off = FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD * (blockDim.x * blockIdx.x); block_off < n; block_off += FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD * (blockDim.x * gridDim.x)) { IndexType thread_off = block_off + FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD * threadIdx.x; IndexType last_node_thread = thread_off + FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD - 1; IndexType thread_row_ptr[FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD]; IndexType block_valid_items = n - block_off + 1; //+1, we need row_ptr[last_node+1] BlockLoad(load_temp_storage).Load( row_ptr + block_off, thread_row_ptr, block_valid_items, -1); //To compute 4 degrees, we need 5 values of row_ptr //Saving the "5th" value in shared memory for previous thread to use if (threadIdx.x > 0) { row_ptr_tail[threadIdx.x - 1] = thread_row_ptr[0]; } //If this is the last thread, it needs to load its row ptr tail value if (threadIdx.x == (FLAG_ISOLATED_VERTICES_DIMX - 1) && last_node_thread < n) { row_ptr_tail[threadIdx.x] = row_ptr[last_node_thread + 1]; } __syncthreads(); // we may reuse temp_storage int local_isolated_bmap = 0; IndexType imax = (n - thread_off); IndexType local_degree[FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD]; #pragma unroll for (int i = 0; i < (FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD - 1); ++i) { IndexType degree = local_degree[i] = thread_row_ptr[i + 1] - thread_row_ptr[i]; if (i < imax) local_isolated_bmap |= ((degree == 0) << i); } if (last_node_thread < n) { IndexType degree = local_degree[FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD - 1] = row_ptr_tail[threadIdx.x] - thread_row_ptr[FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD - 1]; local_isolated_bmap |= ((degree == 0) << (FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD - 1)); } local_isolated_bmap <<= (thread_off % INT_SIZE); IndexType local_nisolated = __popc(local_isolated_bmap); //We need local_nisolated and local_isolated_bmap to be ready for next steps __syncthreads(); IndexType total_nisolated = BlockReduce(block_reduce_temp_storage).Sum(local_nisolated); if (threadIdx.x == 0 && total_nisolated) { atomicAdd(nisolated, total_nisolated); } int logicalwarpid = threadIdx.x / FLAG_ISOLATED_VERTICES_THREADS_PER_INT; //Building int for bmap int int_aggregate_isolated_bmap = WarpReduce(warp_reduce_temp_storage[logicalwarpid]).Reduce( local_isolated_bmap, BitwiseOr()); int is_head_of_visited_int = ((threadIdx.x % (FLAG_ISOLATED_VERTICES_THREADS_PER_INT)) == 0); if (is_head_of_visited_int) { isolated_bmap[thread_off / INT_SIZE] = int_aggregate_isolated_bmap; } BlockStore(store_temp_storage).Store(degrees + block_off, local_degree, block_valid_items); } } template<typename IndexType> void flag_isolated_vertices( IndexType n, int *isolated_bmap, const IndexType *row_ptr, IndexType *degrees, IndexType *nisolated, cudaStream_t m_stream) { dim3 grid, block; block.x = FLAG_ISOLATED_VERTICES_DIMX; grid.x = min( (IndexType) MAXBLOCKS, (n / FLAG_ISOLATED_VERTICES_VERTICES_PER_THREAD + 1 + block.x - 1) / block.x); flag_isolated_vertices_kernel<<<grid, block, 0, m_stream>>>(n, isolated_bmap, row_ptr, degrees, nisolated); cudaCheckError() ; } // // // // Some utils functions // // //Creates CUB data for graph size n template<typename IndexType> void cub_exclusive_sum_alloc(IndexType n, void*& d_temp_storage, size_t &temp_storage_bytes) { // Determine temporary device storage requirements for exclusive prefix scan d_temp_storage = NULL; temp_storage_bytes = 0; IndexType *d_in = NULL, *d_out = NULL; cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, n); // Allocate temporary storage for exclusive prefix scan cudaMalloc(&d_temp_storage, temp_storage_bytes); } template<typename IndexType> __global__ void fill_kernel(IndexType *vec, IndexType n, IndexType val) { for (IndexType u = blockDim.x * blockIdx.x + threadIdx.x; u < n; u += gridDim.x * blockDim.x) vec[u] = val; } template<typename IndexType> void fill(IndexType *vec, IndexType n, IndexType val, cudaStream_t m_stream) { dim3 grid, block; block.x = 256; grid.x = min((n + block.x - 1) / block.x, (IndexType) MAXBLOCKS); fill_kernel<<<grid, block, 0, m_stream>>>(vec, n, val); cudaCheckError() ; } template<typename IndexType> __global__ void set_frontier_degree_kernel( IndexType *frontier_degree, IndexType *frontier, const IndexType *degree, IndexType n) { for (IndexType idx = blockDim.x * blockIdx.x + threadIdx.x; idx < n; idx += gridDim.x * blockDim.x) { IndexType u = frontier[idx]; frontier_degree[idx] = degree[u]; } } template<typename IndexType> void set_frontier_degree( IndexType *frontier_degree, IndexType *frontier, const IndexType *degree, IndexType n, cudaStream_t m_stream) { dim3 grid, block; block.x = 256; grid.x = min((n + block.x - 1) / block.x, (IndexType) MAXBLOCKS); set_frontier_degree_kernel<<<grid, block, 0, m_stream>>>(frontier_degree, frontier, degree, n); cudaCheckError() ; } template<typename IndexType> void exclusive_sum( void *d_temp_storage, size_t temp_storage_bytes, IndexType *d_in, IndexType *d_out, IndexType num_items, cudaStream_t m_stream) { if (num_items <= 1) return; //DeviceScan fails if n==1 cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, m_stream); } template<typename T> __global__ void fill_vec_kernel(T *vec, T n, T val) { for (T idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) vec[idx] = val; } template<typename T> void fill_vec(T *vec, T n, T val, cudaStream_t stream) { dim3 grid, block; block.x = 256; grid.x = (n + block.x - 1) / block.x; fill_vec_kernel<<<grid, block, 0, stream>>>(vec, n, val); cudaCheckError() ; } } //
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/triangles_counting.cpp
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <triangles_counting.hxx> #include <triangles_counting_kernels.hxx> #include <thrust/sequence.h> namespace nvgraph { namespace triangles_counting { template <typename IndexType> TrianglesCount<IndexType>::TrianglesCount(const CsrGraph <IndexType>& graph, cudaStream_t stream, int device_id) { m_stream = stream; m_done = true; if (device_id == -1) cudaGetDevice(&m_dev_id); else m_dev_id = device_id; cudaGetDeviceProperties(&m_dev_props, m_dev_id); cudaCheckError(); cudaSetDevice(m_dev_id); cudaCheckError(); // fill spmat struct; m_mat.nnz = graph.get_num_edges(); m_mat.N = graph.get_num_vertices(); m_mat.roff_d = graph.get_raw_row_offsets(); m_mat.cols_d = graph.get_raw_column_indices(); m_seq.allocate(m_mat.N, stream); create_nondangling_vector(m_mat.roff_d, m_seq.raw(), &(m_mat.nrows), m_mat.N, m_stream); m_mat.rows_d = m_seq.raw(); } template <typename IndexType> TrianglesCount<IndexType>::~TrianglesCount() { cudaSetDevice(m_dev_id); } template <typename IndexType> void TrianglesCount<IndexType>::tcount_bsh() { // printf("TrianglesCount: %s\n", __func__); fflush(stdout); if (m_dev_props.sharedMemPerBlock*8 < (size_t)m_mat.nrows) { FatalError("Number of vertices to high to use this kernel!", NVGRAPH_ERR_BAD_PARAMETERS); } unsigned int *bmap_d; size_t bmld = DIV_UP(m_mat.N,8*sizeof(*bmap_d)); bmld = 8ull*DIV_UP(bmld*sizeof(*bmap_d), 8); bmld /= sizeof(*bmap_d); //size_t bmap_sz = sizeof(*bmap_d)*bmld; int nblock = m_mat.nrows; Vector<uint64_t> ocnt_d(nblock); cudaMemset(ocnt_d.raw(), 0, ocnt_d.bytes()); cudaCheckError(); tricnt_bsh(nblock, &m_mat, ocnt_d.raw(), bmld, m_stream); m_triangles_number = reduce(ocnt_d.raw(), nblock, m_stream); } template <typename IndexType> void TrianglesCount<IndexType>::tcount_b2b() { // printf("TrianglesCount: %s\n", __func__); fflush(stdout); // allocate a big enough array for output Vector<uint64_t> ocnt_d(m_mat.nrows); cudaMemset(ocnt_d.raw(), 0, ocnt_d.bytes()); cudaCheckError(); // allocate level 1 bitmap Vector<unsigned int> bmapL1_d; size_t bmldL1 = DIV_UP(m_mat.N,8*sizeof(*bmapL1_d.raw())); // make the size a multiple of 8 bytes, for zeroing in kernel... bmldL1 = 8ull*DIV_UP(bmldL1*sizeof(*bmapL1_d.raw()), 8); bmldL1 /= sizeof(*bmapL1_d.raw()); size_t free_bytes, total_bytes; cudaMemGetInfo(&free_bytes, &total_bytes); cudaCheckError(); int nblock = (free_bytes*95/100) / (sizeof(*bmapL1_d.raw())*bmldL1);//@TODO: what? nblock = MIN(nblock, m_mat.nrows); size_t bmapL1_sz = sizeof(*bmapL1_d.raw())*bmldL1*nblock; bmapL1_d.allocate(bmldL1*nblock); //cuda 8.0 : memory past 16th GB may not be set with cudaMemset(), //CHECK_CUDA(cudaMemset(bmapL1_d, 0, bmapL1_sz)); myCudaMemset((unsigned long long *)bmapL1_d.raw(), 0ull, bmapL1_sz/8, m_stream); // allocate level 0 bitmap Vector<unsigned int> bmapL0_d; size_t bmldL0 = DIV_UP(DIV_UP(m_mat.N, BLK_BWL0), 8*sizeof(*bmapL0_d.raw())); bmldL0 = 8ull*DIV_UP(bmldL0*sizeof(*bmapL0_d.raw()), 8); bmldL0 /= sizeof(*bmapL0_d.raw()); size_t bmapL0_sz = sizeof(*bmapL0_d.raw())*nblock*bmldL0; bmapL0_d.allocate(nblock*bmldL0); myCudaMemset((unsigned long long *)bmapL0_d.raw(), 0ull, bmapL0_sz/8, m_stream); tricnt_b2b(nblock, &m_mat, ocnt_d.raw(), bmapL0_d.raw(), bmldL0, bmapL1_d.raw(), bmldL1, m_stream); m_triangles_number = reduce(ocnt_d.raw(), nblock, m_stream); } template <typename IndexType> void TrianglesCount<IndexType>::tcount_wrp() { // printf("TrianglesCount: %s\n", __func__); fflush(stdout); // allocate a big enough array for output Vector<uint64_t> ocnt_d; size_t ocnt_sz = DIV_UP(m_mat.nrows, (THREADS/32)); ocnt_d.allocate(ocnt_sz); cudaMemset(ocnt_d.raw(), 0, ocnt_d.bytes()); cudaCheckError(); Vector<unsigned int> bmap_d; size_t bmld = DIV_UP(m_mat.N,8*sizeof(*bmap_d.raw())); // make the size a multiple of 8 bytes, for zeroing in kernel... bmld = 8ull*DIV_UP(bmld*sizeof(*bmap_d.raw()), 8); bmld /= sizeof(*bmap_d.raw()); // number of blocks limited by birmap size size_t free_bytes, total_bytes; cudaMemGetInfo(&free_bytes, &total_bytes); cudaCheckError(); int nblock = (free_bytes*95/100) / (sizeof(*bmap_d.raw())*bmld*(THREADS/32)); nblock = MIN(nblock, DIV_UP(m_mat.nrows, (THREADS/32))); //int maxblocks = props.multiProcessorCount * props.maxThreadsPerMultiProcessor / THREADS; //nblock = MIN(nblock, maxblocks); size_t bmap_sz = bmld*nblock*(THREADS/32); bmap_d.allocate(bmap_sz); //CUDA 8.0 memory past 16th GB may not be set with cudaMemset() //CHECK_CUDA(cudaMemset(bmap_d, 0, bmap_sz)); myCudaMemset((unsigned long long *)bmap_d.raw(), 0ull, bmap_sz*sizeof(*bmap_d.raw())/8, m_stream); tricnt_wrp(nblock, &m_mat, ocnt_d.raw(), bmap_d.raw(), bmld, m_stream); m_triangles_number = reduce(ocnt_d.raw(), nblock, m_stream); } template <typename IndexType> void TrianglesCount<IndexType>::tcount_thr() { // printf("TrianglesCount: %s\n", __func__); fflush(stdout); int maxblocks = m_dev_props.multiProcessorCount * m_dev_props.maxThreadsPerMultiProcessor / THREADS; int nblock = MIN(maxblocks, DIV_UP(m_mat.nrows,THREADS)); Vector<uint64_t> ocnt_d(nblock); cudaMemset(ocnt_d.raw(), 0, ocnt_d.bytes()); cudaCheckError(); tricnt_thr(nblock, &m_mat, ocnt_d.raw(), m_stream); m_triangles_number = reduce(ocnt_d.raw(), nblock, m_stream); } template <typename IndexType> NVGRAPH_ERROR TrianglesCount<IndexType>::count(TrianglesCountAlgo algo) { // std::cout << "Starting TrianglesCount::count, Algo=" << algo << "\n"; switch(algo) { case TCOUNT_BSH: tcount_bsh(); break; case TCOUNT_B2B: tcount_b2b(); break; case TCOUNT_WRP: tcount_wrp(); break; case TCOUNT_THR: tcount_thr(); break; case TCOUNT_DEFAULT: { double mean_deg = (double)m_mat.nnz / m_mat.nrows; if (mean_deg < DEG_THR1) tcount_thr(); else if (mean_deg < DEG_THR2) tcount_wrp(); else { const int shMinBlkXSM = 6; if (m_dev_props.sharedMemPerBlock*8/shMinBlkXSM < (size_t)m_mat.N) tcount_b2b(); else tcount_bsh(); } } break; default: FatalError("Bad algorithm specified for triangles counting", NVGRAPH_ERR_BAD_PARAMETERS); } m_event.record(); return NVGRAPH_OK; } template class TrianglesCount<int>; } // end namespace triangle counting } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/sssp.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define NEW_CSRMV #include <algorithm> #include <iomanip> #include "valued_csr_graph.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_cusparse.hxx" #include "nvgraph_cublas.hxx" #include "nvgraph_error.hxx" #include "nvgraph_csrmv.hxx" #include "sssp.hxx" #ifdef NEW_CSRMV #include "csrmv_cub.h" #include "cub_semiring/cub.cuh" #endif #include <cfloat> #include "debug_macros.h" #ifdef DEBUG #define SP_VERBOSE 0 #endif namespace nvgraph { template <typename IndexType_, typename ValueType_> void Sssp<IndexType_, ValueType_>::setup(IndexType source_index, Vector<ValueType>& source_connection, Vector<ValueType>& sssp_result) { #ifdef DEBUG int n = static_cast<int>(m_network.get_num_vertices()); if (n != static_cast<int>(source_connection.get_size()) || n != static_cast<int>(sssp_result.get_size()) || !( source_index>=0 && source_index<n) ) { CERR() << "n : " << n << std::endl; CERR() << "source_index : " << source_index << std::endl; CERR() << "source_connection : " << source_connection.get_size() << std::endl; CERR() << "sssp_result : " << sssp_result.get_size() << std::endl; FatalError("Wrong input vector in SSSP solver.", NVGRAPH_ERR_BAD_PARAMETERS); } #endif m_source = source_index; m_tmp = source_connection; m_sssp = sssp_result; //m_mask.allocate(n, m_stream); //m_mask.fill(1, m_stream); m_is_setup = true; } template <typename IndexType_, typename ValueType_> bool Sssp<IndexType_, ValueType_>::solve_it() { int n = static_cast<int>(m_network.get_num_vertices()), nnz = static_cast<int>(m_network.get_num_edges()); int inc = 1; ValueType_ tolerance = static_cast<float>( 1.0E-6); ValueType *sssp = m_sssp.raw(), *tmp = m_tmp.raw(); //initially set y equal to x // int *mask = m_mask.raw(); #ifdef NEW_CSRMV ValueType_ alpha = cub_semiring::cub::MinPlusSemiring<ValueType_>::times_ident(); ValueType_ beta = cub_semiring::cub::MinPlusSemiring<ValueType_>::times_ident(); SemiringDispatch<IndexType_, ValueType_>::template Dispatch< cub_semiring::cub::MinPlusSemiring<ValueType_> >( m_network.get_raw_values(), m_network.get_raw_row_offsets(), m_network.get_raw_column_indices(), tmp, sssp, alpha, beta, n, n, nnz, m_stream); #else ValueType_ alpha = 0.0, beta = 0.0; //times_ident = 0 for MinPlus semiring #if __cplusplus > 199711L Semiring SR = Semiring::MinPlus; #else Semiring SR = MinPlus; #endif // y = Network^T op x op->plus x // *op* is (plus : min, time : +) /*************************** ---> insert csrmv_mp here - semiring: (min, +) - mask: m_mask - parameters: (n, n, nnz, alpha, m_network, tmp, beta, sssp); ****************************/ csrmv_mp<IndexType_, ValueType_>(n, n, nnz, alpha, m_network, tmp, beta, sssp, SR, m_stream); #endif // CVG check : ||tmp - sssp|| Cublas::axpy(n, (ValueType_)-1.0, sssp, inc, tmp, inc); m_residual = Cublas::nrm2(n, tmp, inc); if (m_residual < tolerance) { return true; } else { // we do the convergence check by computing the norm two of tmp = sssp(n-1) - sssp(n) // hence if tmp[i] = 0, sssp[i] hasn't changed so we can skip the i th column at the n+1 iteration //m_tmp.flag_zeros(m_mask, m_stream); m_tmp.copy(m_sssp, m_stream); return false; } } template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR Sssp<IndexType_, ValueType_>::solve(IndexType source_index, Vector<ValueType>& source_connection, Vector<ValueType>& sssp_result) { setup(source_index, source_connection, sssp_result); bool converged = false; int max_it = static_cast<int>(m_network.get_num_edges()), i = 0; #ifdef SP_VERBOSE //int n = static_cast<int>(m_network.get_num_vertices()), nnz = static_cast<int>(m_network.get_num_edges()); //dump_raw_vec(m_network.get_raw_row_offsets(), n, 0); //dump_raw_vec(m_network.get_raw_column_indices(),n, 0); //dump_raw_vec(m_network.get_raw_values(), nnz, 0); std::stringstream ss; ss.str(std::string()); size_t used_mem, free_mem, total_mem; ss <<" --------------------Sssp--------------------"<< std::endl; ss <<" --------------------------------------------"<< std::endl; ss << std::setw(10) << "Iteration" << std::setw(20) << " Mem Usage (MB)" << std::setw(15) << "Residual" << std::endl; ss <<" --------------------------------------------"<< std::endl; COUT()<<ss.str(); #endif while (!converged && i < max_it) { converged = solve_it(); i++; #ifdef SP_VERBOSE ss.str(std::string()); cnmemMemGetInfo(&free_mem, &total_mem, NULL); used_mem=total_mem-free_mem; ss << std::setw(10) << i ; ss.precision(3); ss << std::setw(20) << std::fixed << used_mem/1024.0/1024.0; ss << std::setw(15) << std::scientific << m_residual << std::endl; COUT()<<ss.str(); #endif } m_iterations = i; #ifdef SP_VERBOSE COUT() <<" --------------------------------------------"<< std::endl; #endif return converged ? NVGRAPH_OK : NVGRAPH_ERR_NOT_CONVERGED; } template class Sssp<int, double>; template class Sssp<int, float>; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/convert.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "nvgraph_convert.hxx" #include "nvgraph_error.hxx" namespace nvgraph{ void csr2coo( const int *csrSortedRowPtr, int nnz, int m, int *cooRowInd, cusparseIndexBase_t idxBase){ CHECK_CUSPARSE( cusparseXcsr2coo( Cusparse::get_handle(), csrSortedRowPtr, nnz, m, cooRowInd, idxBase )); } void coo2csr( const int *cooRowInd, int nnz, int m, int *csrSortedRowPtr, cusparseIndexBase_t idxBase){ CHECK_CUSPARSE( cusparseXcoo2csr( Cusparse::get_handle(), cooRowInd, nnz, m, csrSortedRowPtr, idxBase )); } void csr2csc( int m, int n, int nnz, const void *csrVal, const int *csrRowPtr, const int *csrColInd, void *cscVal, int *cscRowInd, int *cscColPtr, cusparseAction_t copyValues, cusparseIndexBase_t idxBase, cudaDataType_t *dataType){ CHECK_CUSPARSE( cusparseCsr2cscEx( Cusparse::get_handle(), m, n, nnz, csrVal, *dataType, csrRowPtr, csrColInd, cscVal, *dataType, cscRowInd, cscColPtr, copyValues, idxBase, *dataType )); } void csc2csr( int m, int n, int nnz, const void *cscVal, const int *cscRowInd, const int *cscColPtr, void *csrVal, int *csrRowPtr, int *csrColInd, cusparseAction_t copyValues, cusparseIndexBase_t idxBase, cudaDataType_t *dataType){ CHECK_CUSPARSE( cusparseCsr2cscEx( Cusparse::get_handle(), m, n, nnz, cscVal, *dataType, cscColPtr, cscRowInd, csrVal, *dataType, csrColInd, csrRowPtr, copyValues, idxBase, *dataType )); } void cooSortByDestination(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType){ size_t pBufferSizeInBytes = 0; SHARED_PREFIX::shared_ptr<char> pBuffer; SHARED_PREFIX::shared_ptr<int> P; // permutation array // step 0: copy src to dst if(dstRowInd!=srcRowInd) CHECK_CUDA( cudaMemcpy(dstRowInd, srcRowInd, nnz*sizeof(int), cudaMemcpyDefault) ); if(dstColInd!=srcColInd) CHECK_CUDA( cudaMemcpy(dstColInd, srcColInd, nnz*sizeof(int), cudaMemcpyDefault) ); // step 1: allocate buffer (needed for cooSortByRow) cooSortBufferSize(m, n, nnz, dstRowInd, dstColInd, &pBufferSizeInBytes); pBuffer = allocateDevice<char>(pBufferSizeInBytes, NULL); // step 2: setup permutation vector P to identity P = allocateDevice<int>(nnz, NULL); createIdentityPermutation(nnz, P.get()); // step 3: sort COO format by Row cooGetDestinationPermutation(m, n, nnz, dstRowInd, dstColInd, P.get(), pBuffer.get()); // step 4: gather sorted cooVals gthrX(nnz, srcVal, dstVal, P.get(), idxBase, dataType); } void cooSortBySource(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType){ size_t pBufferSizeInBytes = 0; SHARED_PREFIX::shared_ptr<char> pBuffer; SHARED_PREFIX::shared_ptr<int> P; // permutation array // step 0: copy src to dst CHECK_CUDA( cudaMemcpy(dstRowInd, srcRowInd, nnz*sizeof(int), cudaMemcpyDefault) ); CHECK_CUDA( cudaMemcpy(dstColInd, srcColInd, nnz*sizeof(int), cudaMemcpyDefault) ); // step 1: allocate buffer (needed for cooSortByRow) cooSortBufferSize(m, n, nnz, dstRowInd, dstColInd, &pBufferSizeInBytes); pBuffer = allocateDevice<char>(pBufferSizeInBytes, NULL); // step 2: setup permutation vector P to identity P = allocateDevice<int>(nnz, NULL); createIdentityPermutation(nnz, P.get()); // step 3: sort COO format by Row cooGetSourcePermutation(m, n, nnz, dstRowInd, dstColInd, P.get(), pBuffer.get()); // step 4: gather sorted cooVals gthrX(nnz, srcVal, dstVal, P.get(), idxBase, dataType); } void coos2csc(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColPtr, cusparseIndexBase_t idxBase, cudaDataType_t *dataType){ // coos -> cood -> csc SHARED_PREFIX::shared_ptr<int> tmp = allocateDevice<int>(nnz, NULL); cooSortByDestination(m, n, nnz, srcVal, srcRowInd, srcColInd, dstVal, dstRowInd, tmp.get(), idxBase, dataType); coo2csr(tmp.get(), nnz, m, dstColPtr, idxBase); } void cood2csr(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowPtr, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType){ // cood -> coos -> csr SHARED_PREFIX::shared_ptr<int> tmp = allocateDevice<int>(nnz, NULL); cooSortBySource(m, n, nnz, srcVal, srcRowInd, srcColInd, dstVal, tmp.get(), dstColInd, idxBase, dataType); coo2csr(tmp.get(), nnz, m, dstRowPtr, idxBase); } void coou2csr(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowPtr, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType){ cood2csr(m, n, nnz, srcVal, srcRowInd, srcColInd, dstVal, dstRowPtr, dstColInd, idxBase, dataType); } void coou2csc(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColPtr, cusparseIndexBase_t idxBase, cudaDataType_t *dataType){ coos2csc(m, n, nnz, srcVal, srcRowInd, srcColInd, dstVal, dstRowInd, dstColPtr, idxBase, dataType); } ////////////////////////// Utility functions ////////////////////////// void createIdentityPermutation(int n, int *p){ CHECK_CUSPARSE( cusparseCreateIdentityPermutation(Cusparse::get_handle(), n, p) ); } void gthrX( int nnz, const void *y, void *xVal, const int *xInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType){ if(*dataType==CUDA_R_32F){ CHECK_CUSPARSE( cusparseSgthr(Cusparse::get_handle(), nnz, (float*)y, (float*)xVal, xInd, idxBase )); } else if(*dataType==CUDA_R_64F) { CHECK_CUSPARSE( cusparseDgthr(Cusparse::get_handle(), nnz, (double*)y, (double*)xVal, xInd, idxBase )); } } void cooSortBufferSize(int m, int n, int nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes) { CHECK_CUSPARSE( cusparseXcoosort_bufferSizeExt( Cusparse::get_handle(), m, n, nnz, cooRows, cooCols, pBufferSizeInBytes )); } void cooGetSourcePermutation(int m, int n, int nnz, int *cooRows, int *cooCols, int *p, void *pBuffer) { CHECK_CUSPARSE( cusparseXcoosortByRow( Cusparse::get_handle(), m, n, nnz, cooRows, cooCols, p, pBuffer )); } void cooGetDestinationPermutation(int m, int n, int nnz, int *cooRows, int *cooCols, int *p, void *pBuffer) { CHECK_CUSPARSE( cusparseXcoosortByColumn( Cusparse::get_handle(), m, n, nnz, cooRows, cooCols, p, pBuffer )); } } //end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/widest_path.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define NEW_CSRMV #include <algorithm> #include <iomanip> #include <cfloat> #include "nvgraph_error.hxx" #include "valued_csr_graph.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_cublas.hxx" #ifdef NEW_CSRMV #include "csrmv_cub.h" #include "cub_semiring/cub.cuh" #endif #include "nvgraph_csrmv.hxx" #include "widest_path.hxx" #include "debug_macros.h" #ifdef DEBUG #define MF_VERBOSE 0 #endif namespace nvgraph { template <typename IndexType_, typename ValueType_> void WidestPath<IndexType_, ValueType_>::setup(IndexType source_index, Vector<ValueType>& source_connection, Vector<ValueType>& widest_path_result) { #ifdef DEBUG int n = static_cast<int>(m_network.get_num_vertices()); if (n != static_cast<int>(source_connection.get_size()) || n != static_cast<int>(widest_path_result.get_size()) || !( source_index>=0 && source_index<n) ) { CERR() << "n : " << n << std::endl; CERR() << "source_index : " << source_index << std::endl; CERR() << "source_connection : " << source_connection.get_size() << std::endl; CERR() << "widest_path_result : " << widest_path_result.get_size() << std::endl; FatalError("Wrong input vector in WidestPath solver.", NVGRAPH_ERR_BAD_PARAMETERS); } #endif m_source = source_index; m_tmp = source_connection; m_widest_path = widest_path_result; //m_mask.allocate(n); m_is_setup = true; } template <typename IndexType_, typename ValueType_> bool WidestPath<IndexType_, ValueType_>::solve_it() { int n = static_cast<int>(m_network.get_num_vertices()), nnz = static_cast<int>(m_network.get_num_edges()); int inc = 1; ValueType_ tolerance = static_cast<float>( 1.0E-6); ValueType *widest_path = m_widest_path.raw(), *tmp = m_tmp.raw(); // int *mask = m_mask.raw(); // y = Network^T op x op->plus x // *op* is (plus : max, time : min) /*************************** ---> insert csrmv_mp here - semiring: (max, min) - mask: m_mask // not implemented in csrmv - parameters: (n, n, nnz, alpha, m_network, tmp, beta, widest_path); ****************************/ // About setting alpha & beta // 1. The general Csrmv_mp_sr does : // y = alpha op->time A op->time x op->plus beta op->time y // 2. SR = MaxMin has : // plus_ident = SR_type(-inf); // times_ident = SR_type(inf); // times_null = SR_type(-inf); // 3. In order to solve : // y = Network^T op x op->plus x // We need alpha = times_ident // beta = times_ident #ifdef NEW_CSRMV ValueType_ alpha = cub_semiring::cub::MaxMinSemiring<ValueType_>::times_ident(); ValueType_ beta = cub_semiring::cub::MaxMinSemiring<ValueType_>::times_ident(); SemiringDispatch<IndexType_, ValueType_>::template Dispatch< cub_semiring::cub::MaxMinSemiring<ValueType_> >( m_network.get_raw_values(), m_network.get_raw_row_offsets(), m_network.get_raw_column_indices(), tmp, widest_path, alpha, beta, n, n, nnz, m_stream); #else ValueType_ inf; if (typeid(ValueType_) == typeid(float)) inf = FLT_MAX ; else if (typeid(ValueType_) == typeid(double)) inf = DBL_MAX ; else FatalError("Graph value type is not supported by this semiring.", NVGRAPH_ERR_BAD_PARAMETERS); ValueType_ alpha = inf, beta = inf; #if __cplusplus > 199711L Semiring SR = Semiring::MaxMin; #else // new csrmv Semiring SR = MaxMin; #endif csrmv_mp<IndexType_, ValueType_>(n, n, nnz, alpha, m_network, tmp, beta, widest_path, SR, m_stream); #endif // new csrmv // CVG check : ||tmp - widest_path|| Cublas::axpy(n, (ValueType_)-1.0, widest_path, inc, tmp, inc); m_residual = Cublas::nrm2(n, tmp, inc); if (m_residual < tolerance) { return true; } else { // we do the convergence check by computing the norm two of tmp = widest_path(n-1) - widest_path(n) // hence if tmp[i] = 0, widest_path[i] hasn't changed so we can skip the i th column at the n+1 iteration // m_tmp.flag_zeros(m_mask); m_tmp.copy(m_widest_path); // we want x+1 = Ax +x and csrmv does y = Ax+y, so we copy x in y here. return false; } } template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR WidestPath<IndexType_, ValueType_>::solve(IndexType source_index, Vector<ValueType>& source_connection, Vector<ValueType>& widest_path_result) { setup(source_index, source_connection, widest_path_result); bool converged = false; int max_it = 100000, i = 0; #ifdef MF_VERBOSE std::stringstream ss; ss.str(std::string()); size_t used_mem, free_mem, total_mem; ss <<" ------------------WidestPath------------------"<< std::endl; ss <<" --------------------------------------------"<< std::endl; ss << std::setw(10) << "Iteration" << std::setw(20) << " Mem Usage (MB)" << std::setw(15) << "Residual" << std::endl; ss <<" --------------------------------------------"<< std::endl; COUT()<<ss.str(); #endif while (!converged && i < max_it) { converged = solve_it(); i++; #ifdef MF_VERBOSE ss.str(std::string()); cnmemMemGetInfo(&free_mem, &total_mem, NULL); used_mem=total_mem-free_mem; ss << std::setw(10) << i ; ss.precision(3); ss << std::setw(20) << std::fixed << used_mem/1024.0/1024.0; ss << std::setw(15) << std::scientific << m_residual << std::endl; COUT()<<ss.str(); #endif } m_iterations = i; #ifdef MF_VERBOSE COUT() <<" --------------------------------------------"<< std::endl; #endif return converged ? NVGRAPH_OK : NVGRAPH_ERR_NOT_CONVERGED; } template class WidestPath<int, double>; template class WidestPath<int, float>; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/modularity_maximization.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#ifdef NVGRAPH_PARTITION #include "modularity_maximization.hxx" #include <stdio.h> #include <math.h> #include <cuda.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/reduce.h> #include <thrust/transform.h> #include "nvgraph_error.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_cublas.hxx" #include "matrix.hxx" #include "lanczos.hxx" #include "kmeans.hxx" #include "debug_macros.h" #include "lobpcg.hxx" #include "sm_utils.h" //#define COLLECT_TIME_STATISTICS 1 //#undef COLLECT_TIME_STATISTICS #ifdef COLLECT_TIME_STATISTICS #include <stddef.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/sysinfo.h> #include "cuda_profiler_api.h" #endif #ifdef COLLECT_TIME_STATISTICS static double timer (void) { struct timeval tv; cudaDeviceSynchronize(); gettimeofday(&tv, NULL); return (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; } #endif namespace nvgraph { // ========================================================= // Useful macros // ========================================================= // Get index of matrix entry #define IDX(i,j,lda) ((i)+(j)*(lda)) // namespace { // /// Get string associated with NVGRAPH error flag // static // const char* nvgraphGetErrorString(NVGRAPH_ERROR e) { // switch(e) { // case NVGRAPH_OK: return "NVGRAPH_OK"; // case NVGRAPH_ERR_BAD_PARAMETERS: return "NVGRAPH_ERR_BAD_PARAMETERS"; // case NVGRAPH_ERR_UNKNOWN: return "NVGRAPH_ERR_UNKNOWN"; // case NVGRAPH_ERR_CUDA_FAILURE: return "NVGRAPH_ERR_CUDA_FAILURE"; // case NVGRAPH_ERR_THRUST_FAILURE: return "NVGRAPH_ERR_THRUST_FAILURE"; // case NVGRAPH_ERR_IO: return "NVGRAPH_ERR_IO"; // case NVGRAPH_ERR_NOT_IMPLEMENTED: return "NVGRAPH_ERR_NOT_IMPLEMENTED"; // case NVGRAPH_ERR_NO_MEMORY: return "NVGRAPH_ERR_NO_MEMORY"; // default: return "unknown NVGRAPH error"; // } // } // } template <typename IndexType_, typename ValueType_, bool Device_, bool print_transpose> static int print_matrix(IndexType_ m, IndexType_ n, ValueType_ * A, IndexType_ lda, const char *s){ IndexType_ i,j; ValueType_ * h_A; if (m > lda) { WARNING("print_matrix - invalid parameter (m > lda)"); return -1; } if (Device_) { h_A = (ValueType_ *)malloc(lda*n*sizeof(ValueType_)); if (!h_A) { WARNING("print_matrix - malloc failed"); return -1; } cudaMemcpy(h_A, A, lda*n*sizeof(ValueType_), cudaMemcpyDeviceToHost); cudaCheckError() } else { h_A = A; } printf("%s\n",s); if(print_transpose){ for (j=0; j<n; j++) { for (i=0; i<m; i++) { //assumption m<lda printf("%8.5f, ", h_A[i+j*lda]); } printf("\n"); } } else { for (i=0; i<m; i++) { //assumption m<lda for (j=0; j<n; j++) { printf("%8.5f, ", h_A[i+j*lda]); } printf("\n"); } } if (Device_) { if (h_A) free(h_A); } return 0; } template <typename IndexType_, typename ValueType_> static __global__ void scale_obs_kernel(IndexType_ m, IndexType_ n, ValueType_ *obs) { IndexType_ i,j,k,index,mm; ValueType_ alpha,v,last; bool valid; //ASSUMPTION: kernel is launched with either 2, 4, 8, 16 or 32 threads in x-dimension //compute alpha mm =(((m+blockDim.x-1)/blockDim.x)*blockDim.x); //m in multiple of blockDim.x alpha=0.0; //printf("[%d,%d,%d,%d] n=%d, li=%d, mn=%d \n",threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y, n, li, mn); for (j=threadIdx.y+blockIdx.y*blockDim.y; j<n; j+=blockDim.y*gridDim.y) { for (i=threadIdx.x; i<mm; i+=blockDim.x) { //check if the thread is valid valid = i<m; //get the value of the last thread last = utils::shfl(alpha, blockDim.x-1, blockDim.x); //if you are valid read the value from memory, otherwise set your value to 0 alpha = (valid) ? obs[i+j*m] : 0.0; alpha = alpha*alpha; //do prefix sum (of size warpSize=blockDim.x =< 32) for (k=1; k<blockDim.x; k*=2) { v = utils::shfl_up(alpha, k, blockDim.x); if (threadIdx.x >= k) alpha+=v; } //shift by last alpha+=last; } } //scale by alpha alpha = utils::shfl(alpha, blockDim.x-1, blockDim.x); alpha = std::sqrt(alpha); for (j=threadIdx.y+blockIdx.y*blockDim.y; j<n; j+=blockDim.y*gridDim.y) { for (i=threadIdx.x; i<m; i+=blockDim.x) { //blockDim.x=32 index = i+j*m; obs[index] = obs[index]/alpha; } } } template <typename IndexType_> IndexType_ next_pow2(IndexType_ n) { IndexType_ v; //Reference: //http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2Float v = n-1; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return v+1; } template <typename IndexType_, typename ValueType_> cudaError_t scale_obs(IndexType_ m, IndexType_ n, ValueType_ *obs) { IndexType_ p2m; dim3 nthreads, nblocks; //find next power of 2 p2m = next_pow2<IndexType_>(m); //setup launch configuration nthreads.x = max(2,min(p2m,32)); nthreads.y = 256/nthreads.x; nthreads.z = 1; nblocks.x = 1; nblocks.y = (n + nthreads.y - 1)/nthreads.y; nblocks.z = 1; //printf("m=%d(%d),n=%d,obs=%p, nthreads=(%d,%d,%d),nblocks=(%d,%d,%d)\n",m,p2m,n,obs,nthreads.x,nthreads.y,nthreads.z,nblocks.x,nblocks.y,nblocks.z); //launch scaling kernel (scale each column of obs by its norm) scale_obs_kernel<IndexType_,ValueType_><<<nblocks,nthreads>>>(m,n,obs); cudaCheckError(); return cudaSuccess; } // ========================================================= // Spectral modularity_maximization // ========================================================= /** Compute partition for a weighted undirected graph. This * partition attempts to minimize the cost function: * Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition) * * @param G Weighted graph in CSR format * @param nClusters Number of partitions. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter_lanczos Maximum number of Lanczos iterations. * @param restartIter_lanczos Maximum size of Lanczos system before * implicit restart. * @param tol_lanczos Convergence tolerance for Lanczos method. * @param maxIter_kmeans Maximum number of k-means iterations. * @param tol_kmeans Convergence tolerance for k-means algorithm. * @param parts (Output, device memory, n entries) Cluster * assignments. * @param iters_lanczos On exit, number of Lanczos iterations * performed. * @param iters_kmeans On exit, number of k-means iterations * performed. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR modularity_maximization( ValuedCsrGraph<IndexType_,ValueType_>& G, IndexType_ nClusters, IndexType_ nEigVecs, IndexType_ maxIter_lanczos, IndexType_ restartIter_lanczos, ValueType_ tol_lanczos, IndexType_ maxIter_kmeans, ValueType_ tol_kmeans, IndexType_ * __restrict__ clusters, Vector<ValueType_> &eigVals, Vector<ValueType_> &eigVecs, IndexType_ & iters_lanczos, IndexType_ & iters_kmeans) { // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(nClusters < 1) { WARNING("invalid parameter (nClusters<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter_lanczos < nEigVecs) { WARNING("invalid parameter (maxIter_lanczos<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter_lanczos < nEigVecs) { WARNING("invalid parameter (restartIter_lanczos<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol_lanczos < 0) { WARNING("invalid parameter (tol_lanczos<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter_kmeans < 0) { WARNING("invalid parameter (maxIter_kmeans<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol_kmeans < 0) { WARNING("invalid parameter (tol_kmeans<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ zero = 0; const ValueType_ one = 1; // Loop index IndexType_ i; // Matrix dimension IndexType_ n = G.get_num_vertices(); // CUDA stream // TODO: handle non-zero streams cudaStream_t stream = 0; // Matrices Matrix<IndexType_, ValueType_> * A; // Adjacency matrix Matrix<IndexType_, ValueType_> * B; // Modularity matrix // Whether to perform full reorthogonalization in Lanczos bool reorthogonalize_lanczos = false; // k-means residual ValueType_ residual_kmeans; bool scale_eigevec_rows=true; //true; //false; #ifdef COLLECT_TIME_STATISTICS double t1=0.0,t2=0.0; #endif // ------------------------------------------------------- // Spectral partitioner // ------------------------------------------------------- // Compute eigenvectors of Modularity Matrix #ifdef COLLECT_TIME_STATISTICS t1=timer(); #endif // Initialize Modularity Matrix A = new CsrMatrix<IndexType_,ValueType_>(G); B = new ModularityMatrix<IndexType_,ValueType_>(*A, static_cast<IndexType_>(G.get_num_edges())); // Compute smallest eigenvalues and eigenvectors #ifdef COLLECT_TIME_STATISTICS t2=timer(); printf("%f\n",t2-t1); #endif #ifdef COLLECT_TIME_STATISTICS t1=timer(); cudaProfilerStart(); #endif CHECK_NVGRAPH(computeLargestEigenvectors(*B, nEigVecs, maxIter_lanczos, restartIter_lanczos, tol_lanczos, reorthogonalize_lanczos, iters_lanczos, eigVals.raw(), eigVecs.raw())); #ifdef COLLECT_TIME_STATISTICS cudaProfilerStop(); t2=timer(); printf("%f\n",t2-t1); #endif #ifdef COLLECT_TIME_STATISTICS t1=timer(); #endif //eigVals.dump(0, nEigVecs); //eigVecs.dump(0, nEigVecs); //eigVecs.dump(n, nEigVecs); //eigVecs.dump(2*n, nEigVecs); // Whiten eigenvector matrix for(i=0; i<nEigVecs; ++i) { ValueType_ mean, std; mean = thrust::reduce(thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i+1,n))); cudaCheckError(); mean /= n; thrust::transform(thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i+1,n)), thrust::make_constant_iterator(mean), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::minus<ValueType_>()); cudaCheckError(); std = Cublas::nrm2(n, eigVecs.raw()+IDX(0,i,n), 1)/std::sqrt(static_cast<ValueType_>(n)); thrust::transform(thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i+1,n)), thrust::make_constant_iterator(std), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::divides<ValueType_>()); cudaCheckError(); } delete B; delete A; // Transpose eigenvector matrix // TODO: in-place transpose { Vector<ValueType_> work(nEigVecs*n, stream); Cublas::set_pointer_mode_host(); Cublas::geam(true, false, nEigVecs, n, &one, eigVecs.raw(), n, &zero, (ValueType_*) NULL, nEigVecs, work.raw(), nEigVecs); CHECK_CUDA(cudaMemcpyAsync(eigVecs.raw(), work.raw(), nEigVecs*n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); } if (scale_eigevec_rows) { //WARNING: notice that at this point the matrix has already been transposed, so we are scaling columns scale_obs(nEigVecs,n,eigVecs.raw()); cudaCheckError() //print_matrix<IndexType_,ValueType_,true,false>(nEigVecs-ifirst,n,obs,nEigVecs-ifirst,"Scaled obs"); //print_matrix<IndexType_,ValueType_,true,true>(nEigVecs-ifirst,n,obs,nEigVecs-ifirst,"Scaled obs"); } #ifdef COLLECT_TIME_STATISTICS t2=timer(); printf("%f\n",t2-t1); #endif #ifdef COLLECT_TIME_STATISTICS t1=timer(); #endif //eigVecs.dump(0, nEigVecs*n); // Find partition with k-means clustering CHECK_NVGRAPH(kmeans(n, nEigVecs, nClusters, tol_kmeans, maxIter_kmeans, eigVecs.raw(), clusters, residual_kmeans, iters_kmeans)); #ifdef COLLECT_TIME_STATISTICS t2=timer(); printf("%f\n\n",t2-t1); #endif return NVGRAPH_OK; } //=================================================== // Analysis of graph partition // ========================================================= namespace { /// Functor to generate indicator vectors /** For use in Thrust transform */ template <typename IndexType_, typename ValueType_> struct equal_to_i_op { const IndexType_ i; public: equal_to_i_op(IndexType_ _i) : i(_i) {} template<typename Tuple_> __host__ __device__ void operator()(Tuple_ t) { thrust::get<1>(t) = (thrust::get<0>(t) == i) ? (ValueType_) 1.0 : (ValueType_) 0.0; } }; } /// Compute modularity /** This function determines the modularity based on a graph and cluster assignments * @param G Weighted graph in CSR format * @param nClusters Number of clusters. * @param parts (Input, device memory, n entries) Cluster assignments. * @param modularity On exit, modularity */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR analyzeModularity(ValuedCsrGraph<IndexType_,ValueType_> & G, IndexType_ nClusters, const IndexType_ * __restrict__ parts, ValueType_ & modularity) { //using namespace thrust; // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Loop index IndexType_ i; // Matrix dimension IndexType_ n = G.get_num_vertices(); // Values for computing partition cost ValueType_ partModularity, partSize; // CUDA stream // TODO: handle non-zero streams cudaStream_t stream = 0; // Device memory Vector<ValueType_> part_i(n, stream); Vector<ValueType_> Bx(n, stream); // Adjacency and Modularity matrices Matrix<IndexType_, ValueType_> * A; Matrix<IndexType_, ValueType_> * B; // ------------------------------------------------------- // Implementation // ------------------------------------------------------- // Check that parameters are valid if(nClusters < 1) { WARNING("invalid parameter (nClusters<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Initialize cuBLAS Cublas::set_pointer_mode_host(); // Initialize Modularity A = new CsrMatrix<IndexType_,ValueType_>(G); B = new ModularityMatrix<IndexType_,ValueType_>(*A, static_cast<IndexType_>(G.get_num_edges())); // Debug //Vector<ValueType_> ones(n,0); //ones.fill(1.0); //B->mv(1, ones.raw(), 0, Bx.raw()); //Bx.dump(0,n); //Cublas::dot(n, Bx.raw(), 1, ones.raw(), 1, &partModularity); //std::cout<< "sum " <<partModularity<< std::endl; // Initialize output modularity = 0; // Iterate through partitions for(i=0; i<nClusters; ++i) { // Construct indicator vector for ith partition thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(parts), thrust::device_pointer_cast(part_i.raw()))), thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(parts+n), thrust::device_pointer_cast(part_i.raw()+n))), equal_to_i_op<IndexType_,ValueType_>(i)); cudaCheckError(); // Compute size of ith partition Cublas::dot(n, part_i.raw(), 1, part_i.raw(), 1, &partSize); partSize = round(partSize); if(partSize < 0.5) { WARNING("empty partition"); continue; } // Compute modularity B->mv(1, part_i.raw(), 0, Bx.raw()); Cublas::dot(n, Bx.raw(), 1, part_i.raw(), 1, &partModularity); // Record results modularity += partModularity; //std::cout<< "partModularity " <<partModularity<< std::endl; } //modularity = modularity/nClusters; // devide by nnz modularity= modularity/B->getEdgeSum(); // Clean up and return delete B; delete A; return NVGRAPH_OK; } // ========================================================= // Explicit instantiation // ========================================================= template NVGRAPH_ERROR modularity_maximization<int,float>( ValuedCsrGraph<int,float> & G, int nClusters, int nEigVecs, int maxIter_lanczos, int restartIter_lanczos, float tol_lanczos, int maxIter_kmeans, float tol_kmeans, int * __restrict__ parts, Vector<float> &eigVals, Vector<float> &eigVecs, int & iters_lanczos, int & iters_kmeans); template NVGRAPH_ERROR modularity_maximization<int,double>( ValuedCsrGraph<int,double> & G, int nClusters, int nEigVecs, int maxIter_lanczos, int restartIter_lanczos, double tol_lanczos, int maxIter_kmeans, double tol_kmeans, int * __restrict__ parts, Vector<double> &eigVals, Vector<double> &eigVecs, int & iters_lanczos, int & iters_kmeans); template NVGRAPH_ERROR analyzeModularity<int,float>(ValuedCsrGraph<int,float> & G, int nClusters, const int * __restrict__ parts, float & modularity); template NVGRAPH_ERROR analyzeModularity<int,double>(ValuedCsrGraph<int,double> & G, int nClusters, const int * __restrict__ parts, double & modularity); } //#endif //NVGRAPH_PARTITION
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/valued_csr_graph.cpp
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "valued_csr_graph.hxx" #include "cnmem_shared_ptr.hxx" // interface with CuMem (memory pool lib) for shared ptr namespace nvgraph { template <typename IndexType_, typename ValueType_> ValuedCsrGraph<IndexType_, ValueType_>& ValuedCsrGraph<IndexType_, ValueType_>::operator=(const ValuedCsrGraph<IndexType_, ValueType_>& graph) { } }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/csr_graph.cpp
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "csr_graph.hxx" namespace nvgraph { template <typename IndexType_> CsrGraph<IndexType_>& CsrGraph<IndexType_>::operator=(const CsrGraph<IndexType_>& graph) { } } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/graph_extractor.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_concrete_visitors.hxx> namespace nvgraph { //------------------------- SubGraph Extraction: ---------------------- // CsrGraph<int>* extract_subgraph_by_vertices(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream) { return extract_from_vertex_subset<int, double>(graph, pV, n, stream); } MultiValuedCsrGraph<int, float>* extract_subgraph_by_vertices(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream) { return static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(extract_from_vertex_subset<int, float>(graph, pV, n, stream)); } MultiValuedCsrGraph<int, double>* extract_subgraph_by_vertices(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream) { return static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(extract_from_vertex_subset<int, double>(graph, pV, n, stream)); } CsrGraph<int>* extract_subgraph_by_edges(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream) { return extract_from_edge_subset<int, double>(graph, pV, n, stream); } MultiValuedCsrGraph<int, float>* extract_subgraph_by_edges(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream) { return static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(extract_from_edge_subset<int, float>(graph, pV, n, stream)); } MultiValuedCsrGraph<int, double>* extract_subgraph_by_edges(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream) { return static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(extract_from_edge_subset<int, double>(graph, pV, n, stream)); } }// end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/partition.cu
//#ifdef NVGRAPH_PARTITION /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "partition.hxx" #include <stdio.h> #include <math.h> #include <cuda.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/reduce.h> #include <thrust/transform.h> #include "nvgraph_error.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_cublas.hxx" #include "matrix.hxx" #include "lanczos.hxx" #include "kmeans.hxx" #include "debug_macros.h" #include "lobpcg.hxx" #include "sm_utils.h" //#define COLLECT_TIME_STATISTICS 1 //#undef COLLECT_TIME_STATISTICS #ifdef COLLECT_TIME_STATISTICS #include <stddef.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/sysinfo.h> #endif static double timer (void) { #ifdef COLLECT_TIME_STATISTICS struct timeval tv; cudaDeviceSynchronize(); gettimeofday(&tv, NULL); return (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; #else return 0.0; #endif } namespace nvgraph { // ========================================================= // Useful macros // ========================================================= // Get index of matrix entry #define IDX(i,j,lda) ((i)+(j)*(lda)) // namespace { // /// Get string associated with NVGRAPH error flag // static // const char* nvgraphGetErrorString(NVGRAPH_ERROR e) { // switch(e) { // case NVGRAPH_OK: return "NVGRAPH_OK"; // case NVGRAPH_ERR_BAD_PARAMETERS: return "NVGRAPH_ERR_BAD_PARAMETERS"; // case NVGRAPH_ERR_UNKNOWN: return "NVGRAPH_ERR_UNKNOWN"; // case NVGRAPH_ERR_CUDA_FAILURE: return "NVGRAPH_ERR_CUDA_FAILURE"; // case NVGRAPH_ERR_THRUST_FAILURE: return "NVGRAPH_ERR_THRUST_FAILURE"; // case NVGRAPH_ERR_IO: return "NVGRAPH_ERR_IO"; // case NVGRAPH_ERR_NOT_IMPLEMENTED: return "NVGRAPH_ERR_NOT_IMPLEMENTED"; // case NVGRAPH_ERR_NO_MEMORY: return "NVGRAPH_ERR_NO_MEMORY"; // default: return "unknown NVGRAPH error"; // } // } // } template <typename IndexType_, typename ValueType_, bool Device_, bool print_transpose> static int print_matrix(IndexType_ m, IndexType_ n, ValueType_ * A, IndexType_ lda, const char *s){ IndexType_ i,j; ValueType_ * h_A; if (m > lda) { WARNING("print_matrix - invalid parameter (m > lda)"); return -1; } if (Device_) { h_A = (ValueType_ *)malloc(lda*n*sizeof(ValueType_)); if (!h_A) { WARNING("print_matrix - malloc failed"); return -1; } cudaMemcpy(h_A, A, lda*n*sizeof(ValueType_), cudaMemcpyDeviceToHost); cudaCheckError() } else { h_A = A; } printf("%s\n",s); if(print_transpose){ for (j=0; j<n; j++) { for (i=0; i<m; i++) { //assumption m<lda printf("%8.5f, ", h_A[i+j*lda]); } printf("\n"); } } else { for (i=0; i<m; i++) { //assumption m<lda for (j=0; j<n; j++) { printf("%8.5f, ", h_A[i+j*lda]); } printf("\n"); } } if (Device_) { if (h_A) free(h_A); } return 0; } template <typename IndexType_, typename ValueType_> static __global__ void scale_obs_kernel(IndexType_ m, IndexType_ n, ValueType_ *obs) { IndexType_ i,j,k,index,mm; ValueType_ alpha,v,last; bool valid; //ASSUMPTION: kernel is launched with either 2, 4, 8, 16 or 32 threads in x-dimension //compute alpha mm =(((m+blockDim.x-1)/blockDim.x)*blockDim.x); //m in multiple of blockDim.x alpha=0.0; //printf("[%d,%d,%d,%d] n=%d, li=%d, mn=%d \n",threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y, n, li, mn); for (j=threadIdx.y+blockIdx.y*blockDim.y; j<n; j+=blockDim.y*gridDim.y) { for (i=threadIdx.x; i<mm; i+=blockDim.x) { //check if the thread is valid valid = i<m; //get the value of the last thread last = utils::shfl(alpha, blockDim.x-1, blockDim.x); //if you are valid read the value from memory, otherwise set your value to 0 alpha = (valid) ? obs[i+j*m] : 0.0; alpha = alpha*alpha; //do prefix sum (of size warpSize=blockDim.x =< 32) for (k=1; k<blockDim.x; k*=2) { v = utils::shfl_up(alpha, k, blockDim.x); if (threadIdx.x >= k) alpha+=v; } //shift by last alpha+=last; } } //scale by alpha alpha = utils::shfl(alpha, blockDim.x-1, blockDim.x); alpha = std::sqrt(alpha); for (j=threadIdx.y+blockIdx.y*blockDim.y; j<n; j+=blockDim.y*gridDim.y) { for (i=threadIdx.x; i<m; i+=blockDim.x) { //blockDim.x=32 index = i+j*m; obs[index] = obs[index]/alpha; } } } template <typename IndexType_> IndexType_ next_pow2(IndexType_ n) { IndexType_ v; //Reference: //http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2Float v = n-1; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return v+1; } template <typename IndexType_, typename ValueType_> cudaError_t scale_obs(IndexType_ m, IndexType_ n, ValueType_ *obs) { IndexType_ p2m; dim3 nthreads, nblocks; //find next power of 2 p2m = next_pow2<IndexType_>(m); //setup launch configuration nthreads.x = max(2,min(p2m,32)); nthreads.y = 256/nthreads.x; nthreads.z = 1; nblocks.x = 1; nblocks.y = (n + nthreads.y - 1)/nthreads.y; nblocks.z = 1; //printf("m=%d(%d),n=%d,obs=%p, nthreads=(%d,%d,%d),nblocks=(%d,%d,%d)\n",m,p2m,n,obs,nthreads.x,nthreads.y,nthreads.z,nblocks.x,nblocks.y,nblocks.z); //launch scaling kernel (scale each column of obs by its norm) scale_obs_kernel<IndexType_,ValueType_><<<nblocks,nthreads>>>(m,n,obs); cudaCheckError(); return cudaSuccess; } // ========================================================= // Spectral partitioner // ========================================================= /// Compute spectral graph partition /** Compute partition for a weighted undirected graph. This * partition attempts to minimize the cost function: * Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition) * * @param G Weighted graph in CSR format * @param nParts Number of partitions. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter_lanczos Maximum number of Lanczos iterations. * @param restartIter_lanczos Maximum size of Lanczos system before * implicit restart. * @param tol_lanczos Convergence tolerance for Lanczos method. * @param maxIter_kmeans Maximum number of k-means iterations. * @param tol_kmeans Convergence tolerance for k-means algorithm. * @param parts (Output, device memory, n entries) Partition * assignments. * @param iters_lanczos On exit, number of Lanczos iterations * performed. * @param iters_kmeans On exit, number of k-means iterations * performed. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR partition( ValuedCsrGraph<IndexType_,ValueType_>& G, IndexType_ nParts, IndexType_ nEigVecs, IndexType_ maxIter_lanczos, IndexType_ restartIter_lanczos, ValueType_ tol_lanczos, IndexType_ maxIter_kmeans, ValueType_ tol_kmeans, IndexType_ * __restrict__ parts, Vector<ValueType_> &eigVals, Vector<ValueType_> &eigVecs, IndexType_ & iters_lanczos, IndexType_ & iters_kmeans) { // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(nParts < 1) { WARNING("invalid parameter (nParts<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter_lanczos < nEigVecs) { WARNING("invalid parameter (maxIter_lanczos<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter_lanczos < nEigVecs) { WARNING("invalid parameter (restartIter_lanczos<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol_lanczos < 0) { WARNING("invalid parameter (tol_lanczos<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter_kmeans < 0) { WARNING("invalid parameter (maxIter_kmeans<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol_kmeans < 0) { WARNING("invalid parameter (tol_kmeans<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ zero = 0; const ValueType_ one = 1; // Loop index IndexType_ i; // Matrix dimension IndexType_ n = G.get_num_vertices(); // CUDA stream // TODO: handle non-zero streams cudaStream_t stream = 0; // Matrices Matrix<IndexType_, ValueType_> * A; // Adjacency matrix Matrix<IndexType_, ValueType_> * L; // Laplacian matrix // Whether to perform full reorthogonalization in Lanczos bool reorthogonalize_lanczos = false; // k-means residual ValueType_ residual_kmeans; bool scale_eigevec_rows=SPECTRAL_USE_SCALING_OF_EIGVECS; //true; //false; double t1=0.0,t2=0.0,t_kmeans=0.0; // ------------------------------------------------------- // Spectral partitioner // ------------------------------------------------------- // Compute eigenvectors of Laplacian // Initialize Laplacian A = new CsrMatrix<IndexType_,ValueType_>(G); L = new LaplacianMatrix<IndexType_,ValueType_>(*A); // Compute smallest eigenvalues and eigenvectors CHECK_NVGRAPH(computeSmallestEigenvectors(*L, nEigVecs, maxIter_lanczos, restartIter_lanczos, tol_lanczos, reorthogonalize_lanczos, iters_lanczos, eigVals.raw(), eigVecs.raw())); //eigVals.dump(0, nEigVecs); //eigVecs.dump(0, nEigVecs); //eigVecs.dump(n, nEigVecs); //eigVecs.dump(2*n, nEigVecs); // Whiten eigenvector matrix for(i=0; i<nEigVecs; ++i) { ValueType_ mean, std; mean = thrust::reduce(thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i+1,n))); cudaCheckError(); mean /= n; thrust::transform(thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i+1,n)), thrust::make_constant_iterator(mean), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::minus<ValueType_>()); cudaCheckError(); std = Cublas::nrm2(n, eigVecs.raw()+IDX(0,i,n), 1)/std::sqrt(static_cast<ValueType_>(n)); thrust::transform(thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i+1,n)), thrust::make_constant_iterator(std), thrust::device_pointer_cast(eigVecs.raw()+IDX(0,i,n)), thrust::divides<ValueType_>()); cudaCheckError(); } delete L; delete A; // Transpose eigenvector matrix // TODO: in-place transpose { Vector<ValueType_> work(nEigVecs*n, stream); Cublas::set_pointer_mode_host(); Cublas::geam(true, false, nEigVecs, n, &one, eigVecs.raw(), n, &zero, (ValueType_*) NULL, nEigVecs, work.raw(), nEigVecs); CHECK_CUDA(cudaMemcpyAsync(eigVecs.raw(), work.raw(), nEigVecs*n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); } // Clean up if (scale_eigevec_rows) { //WARNING: notice that at this point the matrix has already been transposed, so we are scaling columns scale_obs(nEigVecs,n,eigVecs.raw()); cudaCheckError() //print_matrix<IndexType_,ValueType_,true,false>(nEigVecs-ifirst,n,obs,nEigVecs-ifirst,"Scaled obs"); //print_matrix<IndexType_,ValueType_,true,true>(nEigVecs-ifirst,n,obs,nEigVecs-ifirst,"Scaled obs"); } t1=timer(); //eigVecs.dump(0, nEigVecs*n); // Find partition with k-means clustering CHECK_NVGRAPH(kmeans(n, nEigVecs, nParts, tol_kmeans, maxIter_kmeans, eigVecs.raw(), parts, residual_kmeans, iters_kmeans)); t2=timer(); t_kmeans+=t2-t1; #ifdef COLLECT_TIME_STATISTICS printf("time k-means %f\n",t_kmeans); #endif return NVGRAPH_OK; } // ========================================================= // Spectral partitioner // ========================================================= /// Compute spectral graph partition /** Compute partition for a weighted undirected graph. This * partition attempts to minimize the cost function: * Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition) * * @param G Weighted graph in CSR format * @param nParts Number of partitions. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter_lanczos Maximum number of Lanczos iterations. * @param restartIter_lanczos Maximum size of Lanczos system before * implicit restart. * @param tol_lanczos Convergence tolerance for Lanczos method. * @param maxIter_kmeans Maximum number of k-means iterations. * @param tol_kmeans Convergence tolerance for k-means algorithm. * @param parts (Output, device memory, n entries) Partition * assignments. * @param iters_lanczos On exit, number of Lanczos iterations * performed. * @param iters_kmeans On exit, number of k-means iterations * performed. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR partition_lobpcg( ValuedCsrGraph<IndexType_,ValueType_>& G, Matrix<IndexType_,ValueType_> * M, cusolverDnHandle_t cusolverHandle, IndexType_ nParts, IndexType_ nEigVecs, IndexType_ maxIter_lanczos, ValueType_ tol_lanczos, IndexType_ maxIter_kmeans, ValueType_ tol_kmeans, IndexType_ * __restrict__ parts, Vector<ValueType_> &eigVals, Vector<ValueType_> &eigVecs, IndexType_ & iters_lanczos, IndexType_ & iters_kmeans) { // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(nParts < 1) { WARNING("invalid parameter (nParts<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter_lanczos < nEigVecs) { WARNING("invalid parameter (maxIter_lanczos<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol_lanczos < 0) { WARNING("invalid parameter (tol_lanczos<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter_kmeans < 0) { WARNING("invalid parameter (maxIter_kmeans<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol_kmeans < 0) { WARNING("invalid parameter (tol_kmeans<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ zero = 0; const ValueType_ one = 1; // Loop index //IndexType_ i; // Matrix dimension IndexType_ n = G.get_num_vertices(); // CUDA stream // TODO: handle non-zero streams cudaStream_t stream = 0; // Matrices Matrix<IndexType_, ValueType_> * A; // Adjacency matrix Matrix<IndexType_, ValueType_> * L; // Laplacian matrix // k-means residual ValueType_ residual_kmeans; bool scale_eigevec_rows=SPECTRAL_USE_SCALING_OF_EIGVECS; //true; //false; double t1=0.0,t2=0.0,t_kmeans=0.0; // Compute eigenvectors of Laplacian // Initialize Laplacian A = new CsrMatrix<IndexType_,ValueType_>(G); L = new LaplacianMatrix<IndexType_,ValueType_>(*A); // LOBPCG use //bool use_lobpcg=SPECTRAL_USE_LOBPCG; //true; //false; bool use_preconditioning=SPECTRAL_USE_PRECONDITIONING; //true; //false; int lwork=0,lwork1=0,lwork2=0,lwork3=0,lwork_potrf=0,lwork_gesvd=0; double t_setup=0.0,t_solve=0.0; //ValueType_ * eigVals; //ValueType_ * work; ValueType_ * lanczosVecs=0; //ValueType_ * obs; //lanczosVecs are not allocated yet, but should not be touched in *_bufferSize routine CHECK_CUSOLVER(cusolverXpotrf_bufferSize(cusolverHandle, nEigVecs,lanczosVecs, nEigVecs,&lwork1)); CHECK_CUSOLVER(cusolverXpotrf_bufferSize(cusolverHandle,2*nEigVecs,lanczosVecs,2*nEigVecs,&lwork2)); CHECK_CUSOLVER(cusolverXpotrf_bufferSize(cusolverHandle,3*nEigVecs,lanczosVecs,3*nEigVecs,&lwork3)); lwork_potrf = max(lwork1,max(lwork2,lwork3)); CHECK_CUSOLVER(cusolverXgesvd_bufferSize(cusolverHandle, nEigVecs, nEigVecs,lanczosVecs,nEigVecs,lanczosVecs,nEigVecs,lanczosVecs,nEigVecs,&lwork1)); CHECK_CUSOLVER(cusolverXgesvd_bufferSize(cusolverHandle,2*nEigVecs,2*nEigVecs,lanczosVecs,nEigVecs,lanczosVecs,nEigVecs,lanczosVecs,nEigVecs,&lwork2)); CHECK_CUSOLVER(cusolverXgesvd_bufferSize(cusolverHandle,3*nEigVecs,3*nEigVecs,lanczosVecs,nEigVecs,lanczosVecs,nEigVecs,lanczosVecs,nEigVecs,&lwork3)); lwork_gesvd = max(lwork1,max(lwork2,lwork3)); lwork = max(lwork_potrf,lwork_gesvd); //allocating +2 to hold devInfo for cuSolver, which is of type int, using 2 rather than 1 just in case //sizeof(ValueType_) < sizeof(IntType_). Notice that this ratio will not be more than 2. //6*nEigVecs*n - Y=[X,R,P] and Z=[Q,T,V], where X and others are of size nEigVecs x n //36*nEigVecs*nEigVecs for G, H, HU and HVT, each of max size 3*nEigVecs x 3*nEigVecs //nEigVecs - nrmR //lwork - Workspace max Lwork value (for either potrf or gesvd) //2 - devInfo cudaMalloc(&lanczosVecs, (9*nEigVecs*n + 36*nEigVecs*nEigVecs + nEigVecs + lwork+2)*sizeof(ValueType_)); cudaCheckError(); //Setup preconditioner M for Laplacian L t1=timer(); if (use_preconditioning) { L->prec_setup(M); } t2=timer(); t_setup+=t2-t1; //Run the eigensolver (with preconditioning) t1=timer(); if(lobpcg_simplified(Cublas::get_handle(),cusolverHandle, n, nEigVecs, L, eigVecs.raw(), eigVals.raw(), maxIter_lanczos,tol_lanczos, lanczosVecs, //work array (on device) iters_lanczos) != 0) { WARNING("error in eigensolver"); return NVGRAPH_ERR_UNKNOWN; } t2=timer(); t_solve+=t2-t1; #ifdef COLLECT_TIME_STATISTICS printf("time eigsolver setup %f\n",t_setup); printf("time eigsolver solve %f\n",t_solve); #endif delete L; delete A; // Transpose eigenvector matrix // TODO: in-place transpose { Vector<ValueType_> work(nEigVecs*n, stream); Cublas::set_pointer_mode_host(); Cublas::geam(true, false, nEigVecs, n, &one, eigVecs.raw(), n, &zero, (ValueType_*) NULL, nEigVecs, work.raw(), nEigVecs); CHECK_CUDA(cudaMemcpyAsync(eigVecs.raw(), work.raw(), nEigVecs*n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); } if (scale_eigevec_rows) { //WARNING: notice that at this point the matrix has already been transposed, so we are scaling columns scale_obs(nEigVecs,n,eigVecs.raw()); cudaCheckError(); //print_matrix<IndexType_,ValueType_,true,false>(nEigVecs-ifirst,n,obs,nEigVecs-ifirst,"Scaled obs"); //print_matrix<IndexType_,ValueType_,true,true>(nEigVecs-ifirst,n,obs,nEigVecs-ifirst,"Scaled obs"); } t1=timer(); //eigVecs.dump(0, nEigVecs*n); // Find partition with k-means clustering CHECK_NVGRAPH(kmeans(n, nEigVecs, nParts, tol_kmeans, maxIter_kmeans, eigVecs.raw(), parts, residual_kmeans, iters_kmeans)); t2=timer(); t_kmeans+=t2-t1; #ifdef COLLECT_TIME_STATISTICS printf("time k-means %f\n",t_kmeans); #endif return NVGRAPH_OK; } // ========================================================= // Analysis of graph partition // ========================================================= namespace { /// Functor to generate indicator vectors /** For use in Thrust transform */ template <typename IndexType_, typename ValueType_> struct equal_to_i_op { const IndexType_ i; public: equal_to_i_op(IndexType_ _i) : i(_i) {} template<typename Tuple_> __host__ __device__ void operator()(Tuple_ t) { thrust::get<1>(t) = (thrust::get<0>(t) == i) ? (ValueType_) 1.0 : (ValueType_) 0.0; } }; } /// Compute cost function for partition /** This function determines the edges cut by a partition and a cost * function: * Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition) * Graph is assumed to be weighted and undirected. * * @param G Weighted graph in CSR format * @param nParts Number of partitions. * @param parts (Input, device memory, n entries) Partition * assignments. * @param edgeCut On exit, weight of edges cut by partition. * @param cost On exit, partition cost function. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR analyzePartition(ValuedCsrGraph<IndexType_,ValueType_> & G, IndexType_ nParts, const IndexType_ * __restrict__ parts, ValueType_ & edgeCut, ValueType_ & cost) { //using namespace thrust; // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Loop index IndexType_ i; // Matrix dimension IndexType_ n = G.get_num_vertices(); // Values for computing partition cost ValueType_ partEdgesCut, partSize; // CUDA stream // TODO: handle non-zero streams cudaStream_t stream = 0; // Device memory Vector<ValueType_> part_i(n, stream); Vector<ValueType_> Lx(n, stream); // Adjacency and Laplacian matrices Matrix<IndexType_, ValueType_> * A; Matrix<IndexType_, ValueType_> * L; // ------------------------------------------------------- // Implementation // ------------------------------------------------------- // Check that parameters are valid if(nParts < 1) { WARNING("invalid parameter (nParts<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Initialize cuBLAS Cublas::set_pointer_mode_host(); // Initialize Laplacian A = new CsrMatrix<IndexType_,ValueType_>(G); L = new LaplacianMatrix<IndexType_,ValueType_>(*A); // Initialize output cost = 0; edgeCut = 0; // Iterate through partitions for(i=0; i<nParts; ++i) { // Construct indicator vector for ith partition thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(parts), thrust::device_pointer_cast(part_i.raw()))), thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(parts+n), thrust::device_pointer_cast(part_i.raw()+n))), equal_to_i_op<IndexType_,ValueType_>(i)); cudaCheckError(); // Compute size of ith partition Cublas::dot(n, part_i.raw(), 1, part_i.raw(), 1, &partSize); partSize = round(partSize); if(partSize < 0.5) { WARNING("empty partition"); continue; } // Compute number of edges cut by ith partition L->mv(1, part_i.raw(), 0, Lx.raw()); Cublas::dot(n, Lx.raw(), 1, part_i.raw(), 1, &partEdgesCut); // Record results cost += partEdgesCut/partSize; edgeCut += partEdgesCut/2; } // Clean up and return delete L; delete A; return NVGRAPH_OK; } // ========================================================= // Explicit instantiation // ========================================================= template NVGRAPH_ERROR partition<int,float>( ValuedCsrGraph<int,float> & G, int nParts, int nEigVecs, int maxIter_lanczos, int restartIter_lanczos, float tol_lanczos, int maxIter_kmeans, float tol_kmeans, int * __restrict__ parts, Vector<float> &eigVals, Vector<float> &eigVecs, int & iters_lanczos, int & iters_kmeans); template NVGRAPH_ERROR partition<int,double>( ValuedCsrGraph<int,double> & G, int nParts, int nEigVecs, int maxIter_lanczos, int restartIter_lanczos, double tol_lanczos, int maxIter_kmeans, double tol_kmeans, int * __restrict__ parts, Vector<double> &eigVals, Vector<double> &eigVecs, int & iters_lanczos, int & iters_kmeans); template NVGRAPH_ERROR partition_lobpcg<int,float>(ValuedCsrGraph<int,float> & G, Matrix<int,float> * M, cusolverDnHandle_t cusolverHandle, int nParts, int nEigVecs, int maxIter_lanczos, float tol_lanczos, int maxIter_kmeans, float tol_kmeans, int * __restrict__ parts, Vector<float> &eigVals, Vector<float> &eigVecs, int & iters_lanczos, int & iters_kmeans); template NVGRAPH_ERROR partition_lobpcg<int,double>(ValuedCsrGraph<int,double> & G, Matrix<int,double> * M, cusolverDnHandle_t cusolverHandle, int nParts, int nEigVecs, int maxIter_lanczos, double tol_lanczos, int maxIter_kmeans, double tol_kmeans, int * __restrict__ parts, Vector<double> &eigVals, Vector<double> &eigVecs, int & iters_lanczos, int & iters_kmeans); template NVGRAPH_ERROR analyzePartition<int,float>(ValuedCsrGraph<int,float> & G, int nParts, const int * __restrict__ parts, float & edgeCut, float & cost); template NVGRAPH_ERROR analyzePartition<int,double>(ValuedCsrGraph<int,double> & G, int nParts, const int * __restrict__ parts, double & edgeCut, double & cost); } //#endif //NVGRAPH_PARTITION
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/size2_selector.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <nvgraph_cusparse.hxx> #include <size2_selector.hxx> #include <common_selector.hxx> #include <async_event.hxx> #include <thrust/device_vector.h> #include <thrust/count.h> //count #include <thrust/sort.h> //sort #include <thrust/binary_search.h> //lower_bound #include <thrust/unique.h> //unique // This should be enabled #define EXPERIMENTAL_ITERATIVE_MATCHING namespace nvgraph { template <typename IndexType> void renumberAndCountAggregates(Vector<IndexType> &aggregates, const IndexType n, IndexType& num_aggregates) { // renumber aggregates Vector<IndexType> scratch(n+1); scratch.fill(0); thrust::device_ptr<IndexType> aggregates_thrust_dev_ptr(aggregates.raw()); thrust::device_ptr<IndexType> scratch_thrust_dev_ptr(scratch.raw()); // set scratch[aggregates[i]] = 1 thrust::fill(thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr), thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr + n), 1); //scratch.dump(0,scratch.get_size()); // do prefix sum on scratch thrust::exclusive_scan(scratch_thrust_dev_ptr, scratch_thrust_dev_ptr+n+1, scratch_thrust_dev_ptr); // scratch.dump(0,scratch.get_size()); // aggregates[i] = scratch[aggregates[i]] thrust::copy(thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr), thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr + n), aggregates_thrust_dev_ptr); cudaCheckError(); cudaMemcpy(&num_aggregates, &scratch.raw()[scratch.get_size()-1], sizeof(int), cudaMemcpyDefault); //num_aggregates = scratch.raw()[scratch.get_size()-1]; cudaCheckError(); } // ------------------ // Constructors // ------------------ template <typename IndexType, typename ValueType> Size2Selector<IndexType, ValueType>::Size2Selector() { //Using default vaues from AmgX m_deterministic = 1; m_stream=0; m_max_iterations = 15; m_numUnassigned_tol = 0.05; m_two_phase = 0; m_aggregation_edge_weight_component= 0; m_merge_singletons = 1; m_weight_formula = 0; m_similarity_metric = SCALED_BY_ROW_SUM; } // ------------------ // Methods // ------------------ // setAggregates for block_dia_csr_matrix_d format template <typename IndexType, typename ValueType> NVGRAPH_ERROR Size2Selector<IndexType, ValueType>::setAggregates_common_sqblocks(const ValuedCsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates) { const IndexType n = (int) A.get_num_vertices(); const IndexType nnz = (int) A.get_num_edges(); const IndexType *A_row_offsets_ptr = A.get_raw_row_offsets(); const IndexType *A_column_indices_ptr = A.get_raw_column_indices(); const ValueType *A_nonzero_values_ptr = A.get_raw_values(); // compute row indices Vector<IndexType> row_indices(nnz); Cusparse::csr2coo( n, nnz, A_row_offsets_ptr, row_indices.raw()); // note : amgx uses cusp for that const IndexType *A_row_indices_ptr = row_indices.raw(); //All vectors should be initialized to -1. aggregates.fill(-1); Vector<IndexType> strongest_neighbour(n); strongest_neighbour.fill(-1); Vector<IndexType> strongest_neighbour_1phase(n); strongest_neighbour_1phase.fill(-1); Vector<float> edge_weights(nnz); edge_weights.fill(-1); float *edge_weights_ptr = edge_weights.raw(); float *rand_edge_weights_ptr = NULL; cudaCheckError(); IndexType *strongest_neighbour_ptr = strongest_neighbour.raw(); IndexType *strongest_neighbour_1phase_ptr = strongest_neighbour_1phase.raw(); IndexType *aggregates_ptr = aggregates.raw(); const int threads_per_block = 256; const int max_grid_size = 256; const int num_blocks = min( max_grid_size, (n-1)/threads_per_block+ 1 ); const int num_blocks_V2 = min( max_grid_size, (nnz-1)/threads_per_block + 1); int bsize = 1; // AmgX legacy: we don't use block CSR matrices, this is just to specify that we run on regular matrices int numUnassigned = n; int numUnassigned_previous = numUnassigned; thrust::device_ptr<IndexType> aggregates_thrust_dev_ptr(aggregates_ptr); switch(m_similarity_metric) { case USER_PROVIDED : { //copy non wero values of A in edge_weights (float) convert_type<<<num_blocks_V2,threads_per_block,0,this->m_stream>>>(nnz, A_nonzero_values_ptr, edge_weights_ptr); cudaCheckError(); //edge_weights.dump(0,nnz); break; } case SCALED_BY_ROW_SUM : { // Compute the edge weights using .5*(A_ij+A_ji)/max(d(i),d(j)) where d(i) is the sum of outgoing edges of i Vector<ValueType> row_sum(n); const ValueType *A_row_sum_ptr = row_sum.raw(); Vector<ValueType> ones(n); ones.fill(1.0); ValueType alpha = 1.0, beta =0.0; Cusparse::csrmv(false, false, n, n, nnz,&alpha,A_nonzero_values_ptr, A_row_offsets_ptr, A_column_indices_ptr, ones.raw(),&beta, row_sum.raw()); cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType,ValueType,float>,cudaFuncCachePreferL1); computeEdgeWeights_simple<<<num_blocks_V2,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_row_sum_ptr, A_nonzero_values_ptr, nnz, edge_weights_ptr, rand_edge_weights_ptr, n, this->m_weight_formula); cudaCheckError(); break; } case SCALED_BY_DIAGONAL : { // Compute the edge weights using AmgX formula (works only if there is a diagonal entry for each row) Vector<IndexType> diag_idx(n); const IndexType *A_dia_idx_ptr = diag_idx.raw(); computeDiagonalKernelCSR<<<num_blocks,threads_per_block,0,this->m_stream>>>(n, A.get_raw_row_offsets(), A.get_raw_column_indices(), diag_idx.raw()); cudaCheckError(); cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType,ValueType,float>,cudaFuncCachePreferL1); computeEdgeWeightsBlockDiaCsr_V2<<<num_blocks_V2,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, nnz, edge_weights_ptr, rand_edge_weights_ptr, n, bsize,this->m_aggregation_edge_weight_component, this->m_weight_formula); cudaCheckError(); break; } default: return NVGRAPH_ERR_BAD_PARAMETERS; } #ifdef EXPERIMENTAL_ITERATIVE_MATCHING // TODO (from amgx): allocate host pinned memory AsyncEvent *throttle_event = new AsyncEvent; throttle_event->create(); std::vector<IndexType> h_unagg_vec(1); Vector<IndexType> d_unagg_vec(1); int *unaggregated = &h_unagg_vec[0]; int *d_unaggregated = d_unagg_vec.raw(); #endif int icount, s = 1; { icount = 0; float *weights_ptr = edge_weights_ptr; do { if( !this->m_two_phase ) { // 1-phase handshaking findStrongestNeighbourBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_ptr, strongest_neighbour_ptr, bsize, 1, this->m_merge_singletons); cudaCheckError(); } else { // 2-phase handshaking findStrongestNeighbourBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, bsize, 1, this->m_merge_singletons); cudaCheckError(); // 2nd phase: for each block_row, find the strongest neighbour among those who gave hand on 1st phase findStrongestNeighbourBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, bsize, 2, this->m_merge_singletons); cudaCheckError(); } // Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour matchEdges<<<num_blocks,threads_per_block,0,this->m_stream>>>(n, aggregates_ptr, strongest_neighbour_ptr); cudaCheckError(); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING s = (icount & 1); if( s == 0 ) { // count unaggregated vertices cudaMemsetAsync(d_unaggregated, 0, sizeof(int), this->m_stream); countAggregates<IndexType,threads_per_block><<<num_blocks,threads_per_block,0,this->m_stream>>>(n, aggregates_ptr, d_unaggregated); cudaCheckError(); cudaMemcpyAsync(unaggregated, d_unaggregated, sizeof(int), cudaMemcpyDeviceToHost, this->m_stream); throttle_event->record(this->m_stream); cudaCheckError(); } else { throttle_event->sync(); numUnassigned_previous = numUnassigned; numUnassigned = *unaggregated; } #else cudaStreamSynchronize(this->m_stream); numUnassigned_previous = numUnassigned; numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); #endif icount++; } while ( (s == 0) || !(numUnassigned==0 || icount > this->m_max_iterations || 1.0*numUnassigned/n < this->m_numUnassigned_tol || numUnassigned == numUnassigned_previous)); } //print //printf("icount=%i, numUnassiged=%d, numUnassigned_tol=%f\n", icount, numUnassigned, this->m_numUnassigned_tol); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING delete throttle_event; #endif if( this->m_merge_singletons ) { // Merge remaining vertices with current aggregates if (!this->m_deterministic) { while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, n, aggregates_ptr, bsize,this->m_deterministic,(IndexType*) NULL); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); } } else { Vector<int> aggregates_candidate(n); aggregates_candidate.fill(-1); while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, n, aggregates_ptr, bsize,this->m_deterministic,aggregates_candidate.raw()); cudaCheckError(); joinExistingAggregates<<<num_blocks,threads_per_block,0,this->m_stream>>>(n, aggregates_ptr, aggregates_candidate.raw()); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); } } } else { //make singletons aggregateSingletons<<<num_blocks,threads_per_block,0,this->m_stream>>>( aggregates_ptr, n ); cudaCheckError(); } renumberAndCountAggregates(aggregates, n, num_aggregates); return NVGRAPH_OK; } template <typename IndexType, typename ValueType> NVGRAPH_ERROR Size2Selector<IndexType, ValueType>::setAggregates(const ValuedCsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates) { return setAggregates_common_sqblocks( A, aggregates, num_aggregates); } template class Size2Selector<int, float>; template class Size2Selector<int, double>; template void renumberAndCountAggregates <int> (Vector<int> &aggregates, const int n, int& num_aggregates); } //nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/nvgraph_error.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "nvgraph_error.hxx" namespace nvgraph { void nvgraph_default_output(const char *msg, int length) { #if defined(DEBUG) || defined(VERBOSE_DIAG) printf("%s", msg); #endif } NVGRAPH_output_callback nvgraph_output = nvgraph_default_output; NVGRAPH_output_callback error_output = nvgraph_default_output; //NVGRAPH_output_callback nvgraph_distributed_output = nvgraph_default_output;*/ // Timer struct cuda_timer::event_pair { cudaEvent_t start; cudaEvent_t end; }; cuda_timer::cuda_timer(): p(new event_pair()) { } void cuda_timer::start() { cudaEventCreate(&p->start); cudaEventCreate(&p->end); cudaEventRecord(p->start, 0); cudaCheckError(); } float cuda_timer::stop() { cudaEventRecord(p->end, 0); cudaEventSynchronize(p->end); float elapsed_time; cudaEventElapsedTime(&elapsed_time, p->start, p->end); cudaEventDestroy(p->start); cudaEventDestroy(p->end); cudaCheckError(); return elapsed_time; } } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/nvgraph_lapack.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <nvgraph_lapack.hxx> //#include <f2c.h> //#include <complex> //#define NVGRAPH_USE_LAPACK 1 namespace nvgraph { #define lapackCheckError(status) \ { \ if (status < 0) \ { \ std::stringstream ss; \ ss << "Lapack error: argument number " \ << -status << " had an illegal value."; \ FatalError(ss.str(), NVGRAPH_ERR_UNKNOWN); \ } \ else if (status > 0) \ FatalError("Lapack error: internal error.", \ NVGRAPH_ERR_UNKNOWN); \ } \ template <typename T> void Lapack<T>::check_lapack_enabled() { #ifndef NVGRAPH_USE_LAPACK FatalError("Error: LAPACK not enabled.", NVGRAPH_ERR_UNKNOWN); #endif } typedef enum{ CUSOLVER_STATUS_SUCCESS=0, CUSOLVER_STATUS_NOT_INITIALIZED=1, CUSOLVER_STATUS_ALLOC_FAILED=2, CUSOLVER_STATUS_INVALID_VALUE=3, CUSOLVER_STATUS_ARCH_MISMATCH=4, CUSOLVER_STATUS_MAPPING_ERROR=5, CUSOLVER_STATUS_EXECUTION_FAILED=6, CUSOLVER_STATUS_INTERNAL_ERROR=7, CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED=8, CUSOLVER_STATUS_NOT_SUPPORTED = 9, CUSOLVER_STATUS_ZERO_PIVOT=10, CUSOLVER_STATUS_INVALID_LICENSE=11 } cusolverStatus_t; typedef enum { CUBLAS_OP_N=0, CUBLAS_OP_T=1, CUBLAS_OP_C=2 } cublasOperation_t; namespace { // XGEMM //extern "C" //void sgemm_(const char *transa, const char *transb, // const int *m, const int *n, const int *k, // const float *alpha, const float *a, const int *lda, // const float *b, const int *ldb, // const float *beta, float *c, const int *ldc); //extern "C" //void dgemm_(const char *transa, const char *transb, // const int *m, const int *n, const int *k, // const double *alpha, const double *a, const int *lda, // const double *b, const int *ldb, // const double *beta, double *c, const int *ldc); extern "C" cusolverStatus_t cusolverDnSgemmHost( cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc); void lapack_gemm(const char transa, const char transb, int m, int n, int k, float alpha, const float *a, int lda, const float *b, int ldb, float beta, float *c, int ldc) { cublasOperation_t cublas_transa = (transa == 'N')? CUBLAS_OP_N : CUBLAS_OP_T ; cublasOperation_t cublas_transb = (transb == 'N')? CUBLAS_OP_N : CUBLAS_OP_T ; cusolverDnSgemmHost(cublas_transa, cublas_transb, m, n, k, &alpha, (float*)a, lda, (float*)b, ldb, &beta, c, ldc); } extern "C" cusolverStatus_t cusolverDnDgemmHost( cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc); void lapack_gemm(const signed char transa, const signed char transb, int m, int n, int k, double alpha, const double *a, int lda, const double *b, int ldb, double beta, double *c, int ldc) { cublasOperation_t cublas_transa = (transa == 'N')? CUBLAS_OP_N : CUBLAS_OP_T ; cublasOperation_t cublas_transb = (transb == 'N')? CUBLAS_OP_N : CUBLAS_OP_T ; cusolverDnDgemmHost(cublas_transa, cublas_transb, m, n, k, &alpha, (double*)a, lda, (double*)b, ldb, &beta, c, ldc); } // XSTERF //extern "C" //void ssterf_(const int *n, float *d, float *e, int *info); // //extern "C" //void dsterf_(const int *n, double *d, double *e, int *info); // extern "C" cusolverStatus_t cusolverDnSsterfHost( int n, float *d, float *e, int *info); void lapack_sterf(int n, float * d, float * e, int * info) { cusolverDnSsterfHost(n, d, e, info); } extern "C" cusolverStatus_t cusolverDnDsterfHost( int n, double *d, double *e, int *info); void lapack_sterf(int n, double * d, double * e, int * info) { cusolverDnDsterfHost(n, d, e, info); } // XSTEQR //extern "C" //void ssteqr_(const char *compz, const int *n, float *d, float *e, // float *z, const int *ldz, float *work, int * info); //extern "C" //void dsteqr_(const char *compz, const int *n, double *d, double *e, // double *z, const int *ldz, double *work, int *info); extern "C" cusolverStatus_t cusolverDnSsteqrHost( const signed char *compz, int n, float *d, float *e, float *z, int ldz, float *work, int *info); void lapack_steqr(const signed char compz, int n, float * d, float * e, float * z, int ldz, float * work, int * info) { cusolverDnSsteqrHost(&compz, n, d, e, z, ldz, work, info); } extern "C" cusolverStatus_t cusolverDnDsteqrHost( const signed char *compz, int n, double *d, double *e, double *z, int ldz, double *work, int *info); void lapack_steqr(const signed char compz, int n, double * d, double * e, double * z, int ldz, double * work, int * info) { cusolverDnDsteqrHost(&compz, n, d, e, z, ldz, work, info); } #ifdef NVGRAPH_USE_LAPACK extern "C" void sgeqrf_(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); extern "C" void dgeqrf_(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); //extern "C" //void cgeqrf_(int *m, int *n, std::complex<float> *a, int *lda, std::complex<float> *tau, std::complex<float> *work, int *lwork, int *info); //extern "C" //void zgeqrf_(int *m, int *n, std::complex<double> *a, int *lda, std::complex<double> *tau, std::complex<double> *work, int *lwork, int *info); void lapack_geqrf(int m, int n, float *a, int lda, float *tau, float *work, int *lwork, int *info) { sgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); } void lapack_geqrf(int m, int n, double *a, int lda, double *tau, double *work, int *lwork, int *info) { dgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); } //void lapack_geqrf(int m, int n, std::complex<float> *a, int lda, std::complex<float> *tau, std::complex<float> *work, int *lwork, int *info) //{ // cgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); //} //void lapack_geqrf(int m, int n, std::complex<double> *a, int lda, std::complex<double> *tau, std::complex<double> *work, int *lwork, int *info) //{ // zgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); //} extern "C" void sormqr_ (char* side, char* trans, int *m, int *n, int *k, float *a, int *lda, const float *tau, float* c, int *ldc, float *work, int *lwork, int *info); extern "C" void dormqr_(char* side, char* trans, int *m, int *n, int *k, double *a, int *lda, const double *tau, double* c, int *ldc, double *work, int *lwork, int *info); //extern "C" //void cunmqr_ (char* side, char* trans, int *m, int *n, int *k, std::complex<float> *a, int *lda, const std::complex<float> *tau, std::complex<float>* c, int *ldc, std::complex<float> *work, int *lwork, int *info); //extern "C" //void zunmqr_(char* side, char* trans, int *m, int *n, int *k, std::complex<double> *a, int *lda, const std::complex<double> *tau, std::complex<double>* c, int *ldc, std::complex<double> *work, int *lwork, int *info); void lapack_ormqr(char side, char trans, int m, int n, int k, float *a, int lda, float *tau, float* c, int ldc, float *work, int *lwork, int *info) { sormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); } void lapack_ormqr(char side, char trans, int m, int n, int k, double *a, int lda, double *tau, double* c, int ldc, double *work, int *lwork, int *info) { dormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); } //void lapack_unmqr(char side, char trans, int m, int n, int k, std::complex<float> *a, int lda, std::complex<float> *tau, std::complex<float>* c, int ldc, std::complex<float> *work, int *lwork, int *info) //{ // cunmqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); //} //void lapack_unmqr(char side, char trans, int m, int n, int k, std::complex<double> *a, int lda, std::complex<double> *tau, std::complex<double>* c, int ldc, std::complex<double> *work, int *lwork, int *info) //{ // zunmqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); //} // extern "C" // void sorgqr_ ( int* m, int* n, int* k, float* a, int* lda, const float* tau, float* work, int* lwork, int *info ); // extern "C" // void dorgqr_ ( int* m, int* n, int* k, double* a, int* lda, const double* tau, double* work, int* lwork, int *info ); // // void lapack_orgqr( int m, int n, int k, float* a, int lda, const float* tau, float* work, int *lwork, int *info) // { // sorgqr_(&m, &n, &k, a, &lda, tau, work, lwork, info); // } // void lapack_orgqr( int m, int n, int k, double* a, int lda, const double* tau, double* work, int* lwork, int *info ) // { // dorgqr_(&m, &n, &k, a, &lda, tau, work, lwork, info); // } //int lapack_hseqr_dispatch(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // double *h, int* ldh, double *wr, double *wi, double *z, // int*ldz, double *work, int *lwork, int *info) //{ // return dhseqr_(jobvl, jobvr, n, ilo, ihi, h, ldh, wr, wi, z, ldz, work, lwork, info); //} // //int lapack_hseqr_dispatch(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // float *h, int* ldh, float *wr, float *wi, float *z, // int*ldz, float *work, int *lwork, int *info) //{ // return shseqr_(jobvl, jobvr, n, ilo, ihi, h, ldh, wr, wi, z, ldz, work, lwork, info); //} // XGEEV extern "C" int dgeev_(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info); extern "C" int sgeev_(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info); //extern "C" //int dhseqr_(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // double *h, int* ldh, double *wr, double *wi, double *z, // int*ldz, double *work, int *lwork, int *info); //extern "C" //int shseqr_(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // float *h, int* ldh, float *wr, float *wi, float *z, // int*ldz, float *work, int *lwork, int *info); // int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info) { return dgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info); } int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info) { return sgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info); } // real eigenvalues template <typename T> void lapack_geev(T* A, T* eigenvalues, int dim, int lda) { char job = 'N'; T* WI = new T[dim]; int ldv = 1; T* vl = 0; int work_size = 6 * dim; T* work = new T[work_size]; int info; lapack_geev_dispatch(&job, &job, &dim, A, &lda, eigenvalues, WI, vl, &ldv, vl, &ldv, work, &work_size, &info); lapackCheckError(info); delete [] WI; delete [] work; } //real eigenpairs template <typename T> void lapack_geev(T* A, T* eigenvalues, T* eigenvectors, int dim, int lda, int ldvr) { char jobvl = 'N'; char jobvr = 'V'; T* WI = new T[dim]; int work_size = 6 * dim; T* vl = 0; int ldvl = 1; T* work = new T[work_size]; int info; lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues, WI, vl, &ldvl, eigenvectors, &ldvr, work, &work_size, &info); lapackCheckError(info); delete [] WI; delete [] work; } //complex eigenpairs template <typename T> void lapack_geev(T* A, T* eigenvalues_r, T* eigenvalues_i, T* eigenvectors_r, T* eigenvectors_i, int dim, int lda, int ldvr) { char jobvl = 'N'; char jobvr = 'V'; int work_size = 8 * dim; int ldvl = 1; T* work = new T[work_size]; int info; lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues_r, eigenvalues_i, 0, &ldvl, eigenvectors_r, &ldvr, work, &work_size, &info); lapackCheckError(info); delete [] work; } //template <typename T> //void lapack_hseqr(T* Q, T* H, T* eigenvalues, int dim, int ldh, int ldq) //{ // char job = 'S'; // S compute eigenvalues and the Schur form T. On entry, the upper Hessenberg matrix H. // // On exit H contains the upper quasi-triangular matrix T from the Schur decomposition // char jobvr = 'V'; //Take Q on entry, and the product Q*Z is returned. // //ILO and IHI are normally set by a previous call to DGEBAL, Otherwise ILO and IHI should be set to 1 and N // int ilo = 1; // int ihi = dim; // T* WI = new T[dim]; // int ldv = 1; // T* vl = 0; // int work_size = 11 * dim; //LWORK as large as 11*N may be required for optimal performance. It is CPU memory and the matrix is assumed to be small // T* work = new T[work_size]; // int info; // lapack_hseqr_dispatch(&job, &jobvr, &dim, &ilo, &ihi, H, &ldh, eigenvalues, WI, Q, &ldq, work, &work_size, &info); // lapackCheckError(info); // delete [] WI; // delete [] work; //} #endif } // end anonymous namespace template <typename T> void Lapack< T >::gemm(bool transa, bool transb, int m, int n, int k, T alpha, const T * A, int lda, const T * B, int ldb, T beta, T * C, int ldc) { //check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK const char transA_char = transa ? 'T' : 'N'; const char transB_char = transb ? 'T' : 'N'; lapack_gemm(transA_char, transB_char, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); //#endif } template <typename T> void Lapack< T >::sterf(int n, T * d, T * e) { // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK int info; lapack_sterf(n, d, e, &info); lapackCheckError(info); //#endif } template <typename T> void Lapack< T >::steqr(char compz, int n, T * d, T * e, T * z, int ldz, T * work) { // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK int info; lapack_steqr(compz, n, d, e, z, ldz, work, &info); lapackCheckError(info); //#endif } template <typename T> void Lapack< T >::geqrf(int m, int n, T *a, int lda, T *tau, T *work, int *lwork) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK int info; lapack_geqrf(m, n, a, lda, tau, work, lwork, &info); lapackCheckError(info); #endif } template <typename T> void Lapack< T >::ormqr(bool right_side, bool transq, int m, int n, int k, T *a, int lda, T *tau, T *c, int ldc, T *work, int *lwork) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK char side = right_side ? 'R' : 'L'; char trans = transq ? 'T' : 'N'; int info; lapack_ormqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, &info); lapackCheckError(info); #endif } //template <typename T> //void Lapack< T >::unmqr(bool right_side, bool transq, int m, int n, int k, T *a, int lda, T *tau, T *c, int ldc, T *work, int *lwork) //{ // check_lapack_enabled(); // #ifdef NVGRAPH_USE_LAPACK // char side = right_side ? 'R' : 'L'; // char trans = transq ? 'T' : 'N'; // int info; // lapack_unmqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, &info); // lapackCheckError(info); // #endif //} //template <typename T> //void Lapack< T >::orgqr( int m, int n, int k, T* a, int lda, const T* tau, T* work, int* lwork) //{ // check_lapack_enabled(); // #ifdef NVGRAPH_USE_LAPACK // int info; // lapack_orgqr(m, n, k, a, lda, tau, work, lwork, &info); // lapackCheckError(info); // #endif //} //template <typename T> //void Lapack< T >::qrf(int n, int k, T *H, T *C, T *Q, T *R) //{ // check_lapack_enabled(); // #ifdef NVGRAPH_USE_LAPACK // // int m = n, k = n, lda=n, lwork=2*n, info; // // lapack_geqrf(m, n, H, lda, C, work, lwork, &info); // // lapackCheckError(info); // // lapack_ormqr(m, n, k, H, lda, tau, c, ldc, work, lwork, &info); // // lapackCheckError(info); // #endif //} //real eigenvalues template <typename T> void Lapack< T >::geev(T* A, T* eigenvalues, int dim, int lda) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK lapack_geev(A, eigenvalues, dim, lda); #endif } //real eigenpairs template <typename T> void Lapack< T >::geev(T* A, T* eigenvalues, T* eigenvectors, int dim, int lda, int ldvr) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK lapack_geev(A, eigenvalues, eigenvectors, dim, lda, ldvr); #endif } //complex eigenpairs template <typename T> void Lapack< T >::geev(T* A, T* eigenvalues_r, T* eigenvalues_i, T* eigenvectors_r, T* eigenvectors_i, int dim, int lda, int ldvr) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK lapack_geev(A, eigenvalues_r, eigenvalues_i, eigenvectors_r, eigenvectors_i, dim, lda, ldvr); #endif } //template <typename T> //void Lapack< T >::hseqr(T* Q, T* H, T* eigenvalues,T* eigenvectors, int dim, int ldh, int ldq) //{ // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK // lapack_hseqr(Q, H, eigenvalues, dim, ldh, ldq); //#endif //} // Explicit instantiation template void Lapack<float>::check_lapack_enabled(); template void Lapack<float>::gemm(bool transa, bool transb,int m, int n, int k,float alpha, const float * A, int lda, const float * B, int ldb, float beta, float * C, int ldc); template void Lapack<float>::sterf(int n, float * d, float * e); template void Lapack<float>::geev (float* A, float* eigenvalues, float* eigenvectors, int dim, int lda, int ldvr); template void Lapack<float>::geev (float* A, float* eigenvalues_r, float* eigenvalues_i, float* eigenvectors_r, float* eigenvectors_i, int dim, int lda, int ldvr); //template void Lapack<float>::hseqr(float* Q, float* H, float* eigenvalues, float* eigenvectors, int dim, int ldh, int ldq); template void Lapack<float>::steqr(char compz, int n, float * d, float * e, float * z, int ldz, float * work); template void Lapack<float>::geqrf(int m, int n, float *a, int lda, float *tau, float *work, int *lwork); template void Lapack<float>::ormqr(bool right_side, bool transq, int m, int n, int k, float *a, int lda, float *tau, float *c, int ldc, float *work, int *lwork); //template void Lapack<float>::orgqr(int m, int n, int k, float* a, int lda, const float* tau, float* work, int* lwork); template void Lapack<double>::check_lapack_enabled(); template void Lapack<double>::gemm(bool transa, bool transb, int m, int n, int k, double alpha, const double * A, int lda, const double * B, int ldb, double beta, double * C, int ldc); template void Lapack<double>::sterf(int n, double * d, double * e); template void Lapack<double>::geev (double* A, double* eigenvalues, double* eigenvectors, int dim, int lda, int ldvr); template void Lapack<double>::geev (double* A, double* eigenvalues_r, double* eigenvalues_i, double* eigenvectors_r, double* eigenvectors_i, int dim, int lda, int ldvr); //template void Lapack<double>::hseqr(double* Q, double* H, double* eigenvalues, double* eigenvectors, int dim, int ldh, int ldq); template void Lapack<double>::steqr(char compz, int n, double * d, double * e, double * z, int ldz, double * work); template void Lapack<double>::geqrf(int m, int n, double *a, int lda, double *tau, double *work, int *lwork); template void Lapack<double>::ormqr(bool right_side, bool transq, int m, int n, int k, double *a, int lda, double *tau, double *c, int ldc, double *work, int *lwork); //template void Lapack<double>::orgqr(int m, int n, int k, double* a, int lda, const double* tau, double* work, int* lwork); //template void Lapack<std::complex<float> >::geqrf(int m, int n, std::complex<float> *a, int lda, std::complex<float> *tau, std::complex<float> *work, int *lwork); //template void Lapack<std::complex<double> >::geqrf(int m, int n, std::complex<double> *a, int lda, std::complex<double> *tau, std::complex<double> *work, int *lwork); //template void Lapack<std::complex<float> >::unmqr(bool right_side, bool transq, int m, int n, int k, std::complex<float> *a, int lda, std::complex<float> *tau, std::complex<float> *c, int ldc, std::complex<float> *work, int *lwork); //template void Lapack<std::complex<double> >::unmqr(bool right_side, bool transq, int m, int n, int k, std::complex<double> *a, int lda, std::complex<double> *tau, std::complex<double> *c, int ldc, std::complex<double> *work, int *lwork); } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/nvgraph_cusparse.cpp
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <nvgraph_cusparse.hxx> namespace nvgraph { cusparseHandle_t Cusparse::m_handle = 0; namespace { cusparseStatus_t cusparse_csrmv( cusparseHandle_t handle, cusparseOperation_t trans, int m, int n, int nnz, const float *alpha, const cusparseMatDescr_t descr, const float *csrVal, const int *csrRowPtr, const int *csrColInd, const float *x, const float *beta, float *y) { return cusparseScsrmv(handle, trans, m, n, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, beta, y); } cusparseStatus_t cusparse_csrmv( cusparseHandle_t handle, cusparseOperation_t trans, int m, int n, int nnz, const double *alpha, const cusparseMatDescr_t descr, const double *csrVal, const int *csrRowPtr, const int *csrColInd, const double *x, const double *beta, double *y) { return cusparseDcsrmv(handle, trans, m, n, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, beta, y); } cusparseStatus_t cusparse_csrmm(cusparseHandle_t handle, cusparseOperation_t trans, int m, int n, int k, int nnz, const float *alpha, const cusparseMatDescr_t descr, const float *csrVal, const int *csrRowPtr, const int *csrColInd, const float *x, const int ldx, const float *beta, float *y, const int ldy) { return cusparseScsrmm(handle, trans, m, n, k, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, ldx, beta, y, ldy); } cusparseStatus_t cusparse_csrmm( cusparseHandle_t handle, cusparseOperation_t trans, int m, int n, int k, int nnz, const double *alpha, const cusparseMatDescr_t descr, const double *csrVal, const int *csrRowPtr, const int *csrColInd, const double *x, const int ldx, const double *beta, double *y, const int ldy) { return cusparseDcsrmm(handle, trans, m, n, k, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, ldx, beta, y, ldy); } }// end anonymous namespace. // Set pointer mode void Cusparse::set_pointer_mode_device() { cusparseHandle_t handle = Cusparse::get_handle(); cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_DEVICE); } void Cusparse::set_pointer_mode_host() { cusparseHandle_t handle = Cusparse::get_handle(); cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST); } template <typename IndexType_, typename ValueType_> void Cusparse::csrmv( const bool transposed, const bool sym, const int m, const int n, const int nnz, const ValueType_* alpha, const ValueType_* csrVal, const IndexType_ *csrRowPtr, const IndexType_ *csrColInd, const ValueType_* x, const ValueType_* beta, ValueType_* y) { cusparseHandle_t handle = Cusparse::get_handle(); cusparseOperation_t trans = transposed ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; cusparseMatDescr_t descr=0; CHECK_CUSPARSE(cusparseCreateMatDescr(&descr)); // we should move that somewhere else if (sym) { CHECK_CUSPARSE(cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_SYMMETRIC)); } else { CHECK_CUSPARSE(cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL)); } CHECK_CUSPARSE(cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO)); CHECK_CUSPARSE(cusparse_csrmv(handle, trans , m, n, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, beta, y)); CHECK_CUSPARSE(cusparseDestroyMatDescr(descr)); // we should move that somewhere else } template <typename IndexType_, typename ValueType_> void Cusparse::csrmv( const bool transposed, const bool sym, const ValueType_* alpha, const ValuedCsrGraph<IndexType_, ValueType_>& G, const Vector<ValueType_>& x, const ValueType_* beta, Vector<ValueType_>& y ) { cusparseHandle_t handle = Cusparse::get_handle(); cusparseOperation_t trans = transposed ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; cusparseMatDescr_t descr=0; CHECK_CUSPARSE(cusparseCreateMatDescr(&descr)); // we should move that somewhere else if (sym) { CHECK_CUSPARSE(cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_SYMMETRIC)); } else { CHECK_CUSPARSE(cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL)); } int n = G.get_num_vertices(); int nnz = G.get_num_edges(); CHECK_CUSPARSE(cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO)); CHECK_CUSPARSE(cusparse_csrmv(handle, trans , n, n, nnz, alpha, descr, (ValueType_*)G.get_raw_values(), (IndexType_*)G.get_raw_row_offsets(),(IndexType_*)G.get_raw_column_indices(), (ValueType_*)x.raw(), beta, (ValueType_*)y.raw())); CHECK_CUSPARSE(cusparseDestroyMatDescr(descr)); // we should move that somewhere else } template void Cusparse::csrmv( const bool transposed, const bool sym, const int m, const int n, const int nnz, const double* alpha, const double* csrVal, const int *csrRowPtr, const int *csrColInd, const double* x, const double* beta, double* y); template void Cusparse::csrmv( const bool transposed, const bool sym, const int m, const int n, const int nnz, const float* alpha, const float* csrVal, const int *csrRowPtr, const int *csrColInd, const float* x, const float* beta, float* y); /* template void Cusparse::csrmv( const bool transposed, const bool sym, const double* alpha, const ValuedCsrGraph<int, double>& G, const Vector<double>& x, const double* beta, Vector<double>& y ); template void Cusparse::csrmv( const bool transposed, const bool sym, const float* alpha, const ValuedCsrGraph<int, float>& G, const Vector<float>& x, const float* beta, Vector<float>& y ); */ template <typename IndexType_, typename ValueType_> void Cusparse::csrmm(const bool transposed, const bool sym, const int m, const int n, const int k, const int nnz, const ValueType_* alpha, const ValueType_* csrVal, const IndexType_* csrRowPtr, const IndexType_* csrColInd, const ValueType_* x, const int ldx, const ValueType_* beta, ValueType_* y, const int ldy) { cusparseHandle_t handle = Cusparse::get_handle(); cusparseOperation_t trans = transposed ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; cusparseMatDescr_t descr=0; CHECK_CUSPARSE(cusparseCreateMatDescr(&descr)); // we should move that somewhere else if (sym) { CHECK_CUSPARSE(cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_SYMMETRIC)); } else { CHECK_CUSPARSE(cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL)); } CHECK_CUSPARSE(cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO)); CHECK_CUSPARSE(cusparse_csrmm(handle, trans, m, n, k, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, ldx, beta, y, ldy)); CHECK_CUSPARSE(cusparseDestroyMatDescr(descr)); // we should move that somewhere else } template void Cusparse::csrmm(const bool transposed, const bool sym, const int m, const int n, const int k, const int nnz, const double* alpha, const double* csrVal, const int* csrRowPtr, const int* csrColInd, const double* x, const int ldx, const double* beta, double* y, const int ldy); template void Cusparse::csrmm(const bool transposed, const bool sym, const int m, const int n, const int k, const int nnz, const float* alpha, const float* csrVal, const int* csrRowPtr, const int* csrColInd, const float* x, const int ldx, const float* beta, float* y, const int ldy); //template <typename IndexType_, typename ValueType_> void Cusparse::csr2coo( const int n, const int nnz, const int *csrRowPtr, int *cooRowInd) { cusparseHandle_t handle = Cusparse::get_handle(); cusparseIndexBase_t idxBase = CUSPARSE_INDEX_BASE_ZERO ; CHECK_CUSPARSE(cusparseXcsr2coo(handle, csrRowPtr, nnz, n, cooRowInd, idxBase)); } } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/arnoldi.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <iomanip> #include <utility> #include <curand.h> #include "valued_csr_graph.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_vector_kernels.hxx" #include "nvgraph_cusparse.hxx" #include "nvgraph_cublas.hxx" #include "nvgraph_lapack.hxx" #include "nvgraph_error.hxx" #include "pagerank_kernels.hxx" #include "arnoldi.hxx" #include "nvgraph_csrmv.hxx" #include "matrix.hxx" #include "debug_macros.h" #ifdef DEBUG #define IRAM_VERBOSE // #define IRAM_DEBUG #endif namespace nvgraph { template <typename IndexType_, typename ValueType_> ImplicitArnoldi<IndexType_, ValueType_>::ImplicitArnoldi(const ValuedCsrGraph <IndexType, ValueType>& A) :m_A(A), m_markov(false), m_laplacian(false), m_tolerance(1.0E-12), m_iterations(0), m_dirty_bit(false), m_max_iter(500), has_init_guess(false) { // initialize cuda libs outside of the solve (this is slow) // cusparseHandle_t t1 = Cusparse::get_handle(); // cublasHandle_t t2 = Cublas::get_handle(); // compiler is complainig, unused variables Cusparse::get_handle(); Cublas::get_handle(); } template <typename IndexType_, typename ValueType_> ImplicitArnoldi<IndexType_, ValueType_>::ImplicitArnoldi(const ValuedCsrGraph <IndexType, ValueType>& A, int parts) :m_A(A), m_parts(parts), m_laplacian(true), m_markov(false), m_tolerance(1.0E-9), m_iterations(0), m_dirty_bit(false), m_max_iter(500), has_init_guess(false) { // initialize cuda libs outside of the solve (this is slow) // cusparseHandle_t t1 = Cusparse::get_handle(); // cublasHandle_t t2 = Cublas::get_handle(); // compiler is complainig, unused variables Cusparse::get_handle(); Cublas::get_handle(); } template <typename IndexType_, typename ValueType_> ImplicitArnoldi<IndexType_, ValueType_>::ImplicitArnoldi(const ValuedCsrGraph <IndexType, ValueType>& A, Vector<ValueType>& dangling_nodes, const float tolerance, const int max_iter, ValueType alpha) :m_A(A), m_a(dangling_nodes), m_damping(alpha), m_markov(true), m_laplacian(false), m_tolerance(tolerance), m_iterations(0), m_dirty_bit(false), m_max_iter(max_iter), has_init_guess(false) { // initialize cuda libs outside of the solve (this is slow) // cusparseHandle_t t1 = Cusparse::get_handle(); // cublasHandle_t t2 = Cublas::get_handle(); // compiler is complainig, unused variables Cusparse::get_handle(); Cublas::get_handle(); } template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR ImplicitArnoldi<IndexType_, ValueType_>::solve(const int restart_it, const int nEigVals, Vector<ValueType>& initial_guess, Vector<ValueType>& eigVals, Vector<ValueType>& eigVecs, const int nested_subspaces_freq) { //try { #ifdef IRAM_VERBOSE std::stringstream ss; ss.str(std::string()); size_t used_mem, free_mem, total_mem; ss <<" ------------------ImplicitArnoldi------------------"<< std::endl; ss <<" --------------------------------------------"<< std::endl; ss << std::setw(10) << "Iteration" << std::setw(20) << " Mem Usage (MB)" << std::setw(15) << "Residual" << std::endl; ss <<" --------------------------------------------"<< std::endl; COUT()<<ss.str(); // start timer cuda_timer timer; timer.start(); #endif m_nested_subspaces_freq = nested_subspaces_freq; setup(initial_guess, restart_it, nEigVals); m_eigenvectors = eigVecs; bool converged = false; int i = 0; // we can print stats after setup to have the initial residual #ifdef IRAM_VERBOSE ss.str(std::string()); cnmemMemGetInfo(&free_mem, &total_mem, NULL); used_mem=total_mem-free_mem; ss << std::setw(10) << i ; ss.precision(3); ss << std::setw(20) << std::fixed << used_mem/1024.0/1024.0; ss << std::setw(15) << std::scientific << m_residual; if (m_miramns) ss << " (Krylov size: " << m_select << ")"; ss << std::endl; COUT()<<ss.str(); #endif while (!converged && i< m_max_iter) { // re-add the extra eigenvalue in case QR step changed it. m_n_eigenvalues = m_nr_eigenvalues+1; converged = solve_it(); i++; #ifdef IRAM_VERBOSE ss.str(std::string()); cnmemMemGetInfo(&free_mem, &total_mem, NULL); used_mem=total_mem-free_mem; ss << std::setw(10) << i ; ss.precision(3); ss << std::setw(20) << std::fixed << used_mem/1024.0/1024.0; ss << std::setw(15) << std::scientific << m_residual; if (m_miramns) ss << " (Krylov size: " << m_select << ")"; ss << std::endl; COUT()<<ss.str(); #endif } m_iterations = i; if (!m_miramns) { if (m_laplacian) { SR(m_krylov_size); } else if (m_markov) { LR(m_select); } else { LM(m_krylov_size); } } compute_eigenvectors(); cudaMemcpyAsync(eigVals.raw(), &m_ritz_eigenvalues[0], (size_t)(m_nr_eigenvalues*sizeof(m_ritz_eigenvalues[0])), cudaMemcpyHostToDevice); cudaCheckError(); #ifdef IRAM_VERBOSE COUT() <<" --------------------------------------------"<< std::endl; //stop timer COUT() <<" Total Time : "<< timer.stop() << "ms"<<std::endl; COUT() <<" --------------------------------------------"<< std::endl; //for(int i = 0; i<m_nr_eigenvalues; i++) //{ // COUT() << m_ritz_eigenvalues[i]; // if (m_ritz_eigenvalues_i[i]) // COUT() << " " <<m_ritz_eigenvalues_i[i]<<std::endl; // else // COUT() <<std::endl; //} #endif // } catch (const std::exception &exc) {std::cout << exc.what();} // x = m_x; // sometime there is a mixup between pointers, need to investigate that. return NVGRAPH_OK; } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::setup(Vector<ValueType>& initial_guess, const int restart_it, const int nEigVals) { m_krylov_size = restart_it; m_select = m_krylov_size; m_nr_eigenvalues = nEigVals; // We always compute an extra eigenvalue to make sure we always have m_nr_eigenvalues // So even if the double shifted QR consume the m_n_eigenvalues^th eigenvalue we are fine m_n_eigenvalues = m_nr_eigenvalues+1; // General parameter check if(m_krylov_size >= static_cast<int>(m_A.get_num_vertices())) FatalError("ARNOLDI: The krylov subspace size is larger than the matrix", NVGRAPH_ERR_BAD_PARAMETERS); if(m_n_eigenvalues >= m_krylov_size) FatalError("ARNOLDI: The number of required eigenvalues +1 is larger than the maximum krylov subspace size", NVGRAPH_ERR_BAD_PARAMETERS); if(m_krylov_size < 3) FatalError("ARNOLDI: Sould perform at least 3 iterations before restart", NVGRAPH_ERR_BAD_PARAMETERS); // Some checks on optional Markov parameters if (m_markov) { if (m_nr_eigenvalues != 1) FatalError("ARNOLDI: Only one eigenpair is needed for the equilibrium of a Markov chain", NVGRAPH_ERR_BAD_PARAMETERS); if (m_damping > 0.99999 || m_damping < 0.0001) FatalError("ARNOLDI: Wrong damping factor value", NVGRAPH_ERR_BAD_PARAMETERS); } //if (m_laplacian) //{ // if (m_parts > m_n_eigenvalues) // FatalError("IRAM: ", NVGRAPH_ERR_BAD_PARAMETERS); //} // Some checks on optional miramns parameters if ( m_nested_subspaces_freq <= 0) { m_nested_subspaces = 0; m_miramns=false; } else { m_safety_lower_bound = 7; if( m_nested_subspaces_freq > (m_krylov_size-(m_safety_lower_bound+m_nr_eigenvalues+1))) // ie not enough space betwen the number of ev and the max size of the subspace { #ifdef DEBUG COUT()<<"MIRAMns Warning: Invalid frequence of nested subspaces, nested_subspaces_freq > m_max-4*n_eigVal" << std::endl; #endif m_miramns=false; } else { m_miramns=true; // This formula should give the number of subspaces // We allways count the smallest, the largest plus every size matching m_nested_subspaces_freq between them. m_nested_subspaces = 2 + (m_krylov_size-(m_safety_lower_bound+m_nr_eigenvalues+1)-1)/m_nested_subspaces_freq; //COUT()<<"Number of nested subspaces : "<<m_nested_subspaces << std::endl; //COUT()<<"nested_subspaces_freq "<< m_nested_subspaces_freq << std::endl; } } m_residual = 1.0E6; //Allocations size_t n = m_A.get_num_vertices(); // nnz is not used // size_t nnz = m_A.get_num_edges(); // Device m_V.allocate(n*(m_krylov_size + 1)); m_V_tmp.allocate(n*(m_n_eigenvalues + 1)); m_ritz_eigenvectors_d.allocate(m_krylov_size*m_krylov_size); m_Q_d.allocate(m_krylov_size*m_krylov_size); //Host m_Vi.resize(m_krylov_size + 1); m_ritz_eigenvalues.resize(m_krylov_size); m_ritz_eigenvalues_i.resize(m_krylov_size); m_ritz_eigenvectors.resize(m_krylov_size * m_krylov_size); m_H.resize(m_krylov_size * m_krylov_size); m_H_select.resize(m_select*m_select); m_H_tmp.resize(m_krylov_size * m_krylov_size); m_Q.resize(m_krylov_size * m_krylov_size); if(m_miramns) { m_mns_residuals.resize(m_nested_subspaces); m_mns_beta.resize(m_nested_subspaces); } for (int i = 0; i < static_cast<int>(m_Vi.size()); ++i) { m_Vi[i]=m_V.raw()+i*n; } if (!has_init_guess) { const ValueType_ one = 1; const ValueType_ zero = 0; curandGenerator_t randGen; // Initialize random number generator CHECK_CURAND(curandCreateGenerator(&randGen,CURAND_RNG_PSEUDO_PHILOX4_32_10)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(randGen, 123456/*time(NULL)*/)); // Initialize initial vector CHECK_CURAND(curandGenerateNormalX(randGen, m_V.raw(), n, zero, one)); ValueType_ normQ1 = Cublas::nrm2(n, m_V.raw(), 1); Cublas::scal(n, (ValueType_)1.0/normQ1, m_V.raw(), 1); } else { m_V.copy(initial_guess); } //dump_raw_vec (m_V.raw(), 10, 0); if(m_markov) { update_dangling_nodes(n, m_a.raw(), static_cast<ValueType_>( m_damping)); //dump(m_a.raw(), 100, 0); m_b.allocate(n); ValueType_ val = static_cast<float>(1.0/n); // m_b.fill(val); //m_b.dump(0,n); } if (m_laplacian) { // degree matrix m_D.allocate(n); m_b.allocate(n); ValueType_ val = 1.0; m_b.fill(val); size_t n = m_A.get_num_vertices(); size_t nnz = m_A.get_num_edges(); ValueType_ alpha = 1.0, beta =0.0, gamma= -1.0; #if __cplusplus > 199711L Semiring sring = Semiring::PlusTimes; #else Semiring sring = PlusTimes; #endif csrmv_mp<IndexType_, ValueType_>(n, n, nnz, alpha, m_A, m_b.raw(), beta, m_D.raw(), sring); //Cusparse::csrmv(false, false, // n, n, nnz, // &alpha, // m_A.get_raw_values(), // m_A.get_raw_row_offsets(), // m_A.get_raw_column_indices(), // m_b.raw(), // &beta, // m_D.raw()); Cublas::scal(nnz, gamma, m_A.get_raw_values(), 1); // m_b can be deleted now //dump_raw_vec ( m_A.get_raw_values(), nnz, 0); //dump_raw_vec (m_D.raw(), n, 0); } // normalize Cublas::scal(n, (ValueType_)1.0/Cublas::nrm2(n, m_Vi[0], 1) , m_Vi[0], 1); m_iterations = 0; // arnoldi from 0 to k solve_arnoldi(0,m_krylov_size); } #ifdef DEBUG template <typename ValueType_> void dump_host_dense_mat(std::vector<ValueType_>& v, int ld) { std::stringstream ss; ss.str(std::string()); ss << std::setw(10); ss.precision(3); for (int i = 0; i < ld; ++i) { for (int j = 0; j < ld; ++j) { ss << v[i*ld+j] << std::setw(10); } ss << std::endl; } COUT()<<ss.str(); } template <typename ValueType_> void dump_host_vec(std::vector<ValueType_>& v) { std::stringstream ss; ss.str(std::string()); ss << std::setw(10); ss.precision(4); for (int i = 0; i < v.size(); ++i) ss << v[i] << std::setw(10); ss << std::endl; COUT()<<ss.str(); } #endif template <typename IndexType_, typename ValueType_> bool ImplicitArnoldi<IndexType_, ValueType_>::solve_arnoldi(int lower_bound, int upper_bound) { int inc =1, mns_residuals_idx = 0; size_t n = m_A.get_num_vertices(); size_t nnz = m_A.get_num_edges(); ValueType_ alpha = 1.0, beta =0.0, Hji = 0, dot_res; #if __cplusplus > 199711L Semiring sring = Semiring::PlusTimes; #else Semiring sring = PlusTimes; #endif //m_V.dump(lower_bound*n,n); if (m_miramns) { std::fill (m_mns_residuals.begin(),m_mns_residuals.end(),0.0); } for (int i = lower_bound; i < upper_bound; ++i) { // beta = norm(f); v = f/beta; if (i>0 && i == lower_bound) { m_beta = Cublas::nrm2(n, m_Vi[i], 1); // Vi = Vi/||Vi|| Cublas::scal(n, (ValueType_)1.0/m_beta, m_Vi[i], inc); // m_V.dump((i-1)*n,n); } // Compute H, V and f csrmv_mp<IndexType_, ValueType_>(n, n, nnz, alpha, m_A, m_Vi[i], beta, m_Vi[i+1], sring); //if (i == 0) dump_raw_vec (m_Vi[i+1], n, 0); if (m_laplacian) { //apply to the external diagonal dmv(n, alpha, m_D.raw(), m_Vi[i], alpha, m_Vi[i+1]); //dump_raw_vec ( m_D.raw(), 10, 0); //dump_raw_vec (m_Vi[i+1], 10, 0); } if(m_markov) { Cublas::scal(n, m_damping, m_Vi[i+1], inc); Cublas::dot(n, m_a.raw(), inc, m_Vi[i], inc, &dot_res); Cublas::axpy(n, dot_res, m_b.raw(), inc, m_Vi[i+1], inc); } // Modified GS algorithm for (int j = 0; j <= i; ++j) { // H(j,i) = AVi.Vj Cublas::dot(n, m_Vi[i+1], inc, m_Vi[j], inc, &Hji); m_H[i*m_krylov_size + j] = Hji; //V(i + 1) -= H(j, i) * V(j) Cublas::axpy(n, -Hji, m_Vi[j],inc, m_Vi[i+1],inc); } if (i > 0) { // H(i+1,i) = ||Vi|| <=> H(i,i-1) = ||Vi|| m_H[(i-1)*m_krylov_size + i] = m_beta; } //||Vi+1|| m_beta = Cublas::nrm2(n, m_Vi[i+1], 1); if (i+1 < upper_bound) { Cublas::scal(n, (ValueType_)1.0/m_beta, m_Vi[i+1], inc); } if (m_miramns) { // The smallest subspaces is always m_safety_lower_bound+m_nr_eigenvalues+1 // The largest is allways max_krylov_size, // Between that we check the quality at every stride (m_nested_subspaces_freq). if( i == m_safety_lower_bound+m_nr_eigenvalues || i+1 == upper_bound || (i > m_safety_lower_bound+m_nr_eigenvalues && ((i-(m_safety_lower_bound+m_nr_eigenvalues))%m_nested_subspaces_freq == 0)) ) { //COUT()<<"i "<<i<<", idx "<<mns_residuals_idx << std::endl; //dump_host_dense_mat(m_H, m_krylov_size); compute_residual(i+1,true); // it is i+1 just because at an iteration i the subspace size is i+1 //m_mns_residuals[m_krylov_size-m_n_eigenvalues-(m_krylov_size-i)] = m_residual; m_mns_beta[mns_residuals_idx] = m_beta; //store current residual m_mns_residuals[mns_residuals_idx] = m_residual; mns_residuals_idx++; // early exit if converged if (m_residual<m_tolerance) { // prepare for exit here //m_select = m_krylov_size-m_n_eigenvalues-(m_krylov_size-i)+1; m_select = i+1; if (m_laplacian) { SR(m_select); } else if (m_markov) { LR(m_select); } else { LM(m_select); } return true; } } } } #ifdef IRAM_DEBUG COUT() <<"---------------------------------------------"<<std::endl <<" ARNOLDI "<<std::endl <<"---------------------------------------------"<<std::endl; COUT()<<"V:"<<std::endl; for (int i = 0; i < m_Vi.size()-1; ++i) m_V.dump(n*i,n); COUT()<<std::endl<<"f:"<<std::endl; m_V.dump(n*m_krylov_size,n); COUT()<<std::endl<<"H:"<<std::endl; dump_host_dense_mat(m_H, m_krylov_size); #endif // dump_host_dense_mat(m_H, m_krylov_size); // this is where we compute the residual after the arnoldi reduction in IRAM if (!m_miramns) compute_residual(m_krylov_size, true); return m_converged; // maybe we can optimize that later } template <typename IndexType_, typename ValueType_> bool ImplicitArnoldi<IndexType_, ValueType_>::solve_it() { if (m_residual<m_tolerance) return true; // no need to do the k...p arnoldi steps if (m_miramns) { int prev = m_select; select_subspace(); extract_subspace(prev); } implicit_restart(); return solve_arnoldi(m_n_eigenvalues, m_krylov_size); // arnoldi from k to m } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::select_subspace() { #ifdef IRAM_DEBUG COUT() <<std::endl << "Residuals "; dump_host_vec(m_mns_residuals); #endif #if __cplusplus > 199711L typename std::vector<ValueType_>::iterator it = std::min_element(std::begin(m_mns_residuals), std::end(m_mns_residuals)); #else typename std::vector<ValueType_>::iterator it = std::min_element(m_mns_residuals.begin(), m_mns_residuals.end()); #endif m_residual = *it; #if __cplusplus > 199711L int dist = static_cast<int>(std::distance(std::begin(m_mns_residuals), it)); #else int dist = static_cast<int>(std::distance(m_mns_residuals.begin(), it)); #endif m_select = std::min((m_safety_lower_bound+m_nr_eigenvalues) + (m_nested_subspaces_freq*dist) +1, m_krylov_size); m_select_idx = dist ; //COUT()<<"m_select "<<m_select<< std::endl; } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::extract_subspace(int m) { if (m != m_select || m_H_select.size() == 0) { m_H_select.resize(m_select*m_select); m_H_tmp.resize(m_select*m_select); m_Q.resize(m_select*m_select); m_Q_tmp.resize(m_select*m_select); } //m_ritz_eigenvalues.resize(m_select);; //host //m_ritz_eigenvectors.resize(m_select*m_select); // copy //int k = m_krylov_size-m_select; //int l = 0; //for(int i = k; i<m_krylov_size; i++) //{ // for(int j = 0; j<m_select; j++) // { // m_H_select[l*m_select+j] = m_H[i*m_krylov_size+j]; // } // l++; //} for(int i = 0; i<m_select; i++) { for(int j = 0; j<m_select; j++) { m_H_select[i*m_select+j] = m_H[i*m_krylov_size+j]; } } // retrieve || f || if needed if (m_select < m_krylov_size) m_beta = m_mns_beta[m_select_idx]; m_dirty_bit = true; } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::compute_residual(int subspace_size, bool dirty_bit) { //dump_host_dense_mat(m_H_select, m_select); if (m_miramns) { if (dirty_bit) { if (static_cast<int>(m_H_tmp.size()) != subspace_size*subspace_size) m_H_tmp.resize(subspace_size*subspace_size); //std::fill (m_ritz_eigenvalues.begin(),m_ritz_eigenvalues.end(),0.0); //std::fill (m_ritz_eigenvectors.begin(),m_ritz_eigenvectors.end(),0.0); for(int i = 0; i<subspace_size; i++) { for(int j = 0; j<subspace_size; j++) { m_H_tmp[i*subspace_size+j] = m_H[i*m_krylov_size+j]; } } // dump_host_dense_mat(m_H_tmp,subspace_size); //Lapack<ValueType_>::geev(&m_H_tmp[0], &m_ritz_eigenvalues[0], &m_ritz_eigenvectors[0], subspace_size , subspace_size, subspace_size); Lapack<ValueType_>::geev(&m_H_tmp[0], &m_ritz_eigenvalues[0], &m_ritz_eigenvalues_i[0], &m_ritz_eigenvectors[0], NULL, subspace_size , subspace_size, subspace_size); } } else { if (dirty_bit) { // we change m_H_tmp size during miramns if (m_H_tmp.size() != m_H.size()) m_H_tmp.resize(m_H.size()); std::copy(m_H.begin(), m_H.end(), m_H_tmp.begin()); //Lapack<ValueType_>::geev(&m_H_tmp[0], &m_ritz_eigenvalues[0], &m_ritz_eigenvectors[0], m_krylov_size , m_krylov_size, m_krylov_size); Lapack<ValueType_>::geev(&m_H_tmp[0], &m_ritz_eigenvalues[0], &m_ritz_eigenvalues_i[0], &m_ritz_eigenvectors[0], NULL, m_krylov_size , m_krylov_size, m_krylov_size); } } //COUT() << "m_ritz_eigenvalues : "<<std::endl; //dump_host_vec(m_ritz_eigenvalues); //COUT() << "m_ritz_eigenvectors : "<<std::endl; //dump_host_dense_mat(m_ritz_eigenvectors, subspace_size); // sort if (m_laplacian) { SR(subspace_size); } else if (m_markov) { LR(m_select); } else { LM(subspace_size); } //COUT() << "m_ritz_eigenvalues : "<<std::endl; // dump_host_vec(m_ritz_eigenvalues); ValueType_ last_ritz_vector, residual_norm, tmp_residual; ValueType_ lam; m_residual = 0.0f; // Convergence check by approximating the residual of the Ritz pairs. if (m_markov) { last_ritz_vector = m_ritz_eigenvectors[subspace_size-1]; //COUT() << "last_ritz_vector : "<<last_ritz_vector<<std::endl; // if (!last_ritz_vector) // dump_host_dense_mat(m_ritz_eigenvectors, subspace_size); // COUT() << "m_beta : "<<m_beta<<std::endl; m_residual = std::abs(last_ritz_vector * m_beta); if (m_residual == 0.0) m_residual = 1.0E6; } else { for (int i = 0; i < m_n_eigenvalues; i++) { last_ritz_vector = m_ritz_eigenvectors[i * subspace_size + subspace_size-1]; residual_norm = std::abs(last_ritz_vector * m_beta); if(m_ritz_eigenvalues_i[i]) lam = std::sqrt(m_ritz_eigenvalues[i]*m_ritz_eigenvalues[i] + m_ritz_eigenvalues_i[i]*m_ritz_eigenvalues_i[i]); else lam = std::abs(m_ritz_eigenvalues[i]); tmp_residual = residual_norm / lam; //tmp_residual = residual_norm ; //COUT() << "last_ritz_vector : "<<last_ritz_vector<<std::endl; //COUT() << "res : "<<residual_norm<<std::endl; //COUT() << "ri : "<<m_ritz_eigenvalues[i]<<std::endl; //COUT() << "tmp : "<<tmp_residual<<std::endl; if (m_residual<tmp_residual) m_residual = tmp_residual; } } //#ifdef IRAM_DEBUG //COUT()<<std::endl << "Residual " << m_residual <<std::endl; //COUT() << "m_ritz_eigenvalues : "<<std::endl; //dump_host_vec(m_ritz_eigenvalues); //COUT() << "m_ritz_eigenvectors : "<<std::endl; //dump_host_dense_mat(m_ritz_eigenvectors, subspace_size); //COUT() << "m_beta : " << m_beta <<std::endl; //COUT() << "last_ritz_vector : " << last_ritz_vector <<std::endl; //COUT() << "residual_norm : " << residual_norm <<std::endl; //#endif if (m_residual < m_tolerance) { m_converged = true; } else { m_converged = false; } } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::implicit_restart() { // optim: avoid the cpy here if (!m_miramns) std::copy(m_H.begin(), m_H.end(), m_H_select.begin()); select_shifts(m_dirty_bit); #ifdef IRAM_DEBUG for(int i = 0; i<m_n_eigenvalues; i++) { COUT() << m_ritz_eigenvalues[i]; if (m_ritz_eigenvalues_i[i]) COUT() << " " <<m_ritz_eigenvalues_i[i]<<std::endl; else COUT() <<std::endl; } COUT()<<std::endl <<"---------------------------------------------"<<std::endl <<" KRYLOV SOLUTION "<<std::endl <<"---------------------------------------------"<<std::endl; COUT() << "ritz_values : "<<std::endl; dump_host_vec(m_ritz_eigenvalues); COUT() << "ritz_vectors : "<<std::endl; dump_host_dense_mat(m_ritz_eigenvectors, m_select); #endif qr_step(); #ifdef IRAM_DEBUG COUT()<<std::endl <<"---------------------------------------------"<<std::endl <<" SHIFTED QR "<<std::endl <<"---------------------------------------------"<<std::endl; COUT() << "H+"<< std::endl; dump_host_dense_mat(m_H_select, m_select); COUT() << "Q+"<< std::endl; dump_host_dense_mat(m_Q, m_select); #endif refine_basis(); #ifdef IRAM_DEBUG COUT()<<std::endl <<"---------------------------------------------"<<std::endl <<" REFINED BASIS "<<std::endl <<"---------------------------------------------"<<std::endl; int n = m_A.get_num_vertices(); COUT() << "V+ : "<<std::endl; for (int i = 0; i < m_n_eigenvalues; ++i) m_V.dump(n*i,n); COUT()<<std::endl<<"f+:"<<std::endl; m_V.dump(n*m_n_eigenvalues,n); #endif // optim: avoid the cpy here if (!m_miramns) std::copy(m_H_select.begin(), m_H_select.end(), m_H.begin()); } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::select_shifts(bool dirty_bit) { // dirty_bit is false by default if (dirty_bit) { std::copy(m_H_select.begin(), m_H_select.end(), m_H_tmp.begin()); //Lapack<ValueType_>::geev(&m_H_tmp[0], &m_ritz_eigenvalues[0], &m_ritz_eigenvectors[0], m_select , m_select, m_select); Lapack<ValueType_>::geev(&m_H_tmp[0], &m_ritz_eigenvalues[0],&m_ritz_eigenvalues_i[0], &m_ritz_eigenvectors[0], NULL, m_select , m_select, m_select); // #ifdef IRAM_DEBUG // COUT() << "m_ritz_eigenvalues : "<<std::endl; // dump_host_vec(m_ritz_eigenvalues); // COUT() << "m_ritz_eigenvectors : "<<std::endl; // dump_host_dense_mat(m_ritz_eigenvectors, m_select); // #endif } m_dirty_bit = false; if (m_laplacian) { SR(m_select); } else if (m_markov) { LR(m_select); } else { LM(m_select); } // in the future we can quikly add LM, SM, SR // complex (LI SI) are not supported. } #if __cplusplus <= 199711L template<typename ValueType_> bool cmp_LR(const std::pair<int,ValueType_> &left, const std::pair<int,ValueType_> &right){ return left.second > right.second; }; #endif template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::LR(int subspace_sz) { // Eigen values of interest have the largest real part std::vector<std::pair<int,ValueType_> > items; for (int i = 0; i < subspace_sz; ++i) items.push_back(std::make_pair( i, m_ritz_eigenvalues[i])); // this is a reverse key value sort by algebraic value // in this case we select the largest eigenvalues // In the future we can add other shift selection strategies here // to converge to different eigen values (reverse sort by magnitude, or usual sort by magnitude etc ). #if __cplusplus > 199711L std::sort(items.begin(), items.end(),[](const std::pair<int,ValueType_> &left, const std::pair<int,ValueType_> &right) {return left.second > right.second; }); #else std::sort(items.begin(), items.end(), cmp_LR<ValueType_>); #endif // Now we need to reorder the vectors accordingly std::vector<ValueType_> ritz_tmp(m_ritz_eigenvectors); for (int i = 0; i < subspace_sz; ++i) { //COUT() << "reordrering : " << items[i].first <<std::endl // << "start : " <<items[i].first*subspace_sz<<std::endl // << "end : " <<items[i].first*subspace_sz+subspace_sz<<std::endl // << "out : " <<i*subspace_sz<<std::endl; std::copy(ritz_tmp.begin() + (items[i].first*subspace_sz), ritz_tmp.begin() + (items[i].first*subspace_sz + subspace_sz), m_ritz_eigenvectors.begin()+(i*subspace_sz)); m_ritz_eigenvalues[i] = items[i].second; } // dump_host_vec(m_ritz_eigenvalues); std::vector<ValueType_> tmp_i(m_ritz_eigenvalues_i); for (int i = 0; i < subspace_sz; ++i) { m_ritz_eigenvalues_i[i] = tmp_i[items[i].first]; } } template<typename ValueType_> bool cmp_LM(const std::pair<int,ValueType_> &left, const std::pair<int,ValueType_> &right){ return left.second > right.second; }; template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::LM(int subspace_sz) { std::vector<ValueType_> magnitude(subspace_sz); std::vector<std::pair<int, ValueType_ > > kv; for (int i = 0; i < subspace_sz; ++i) magnitude[i] = m_ritz_eigenvalues[i]*m_ritz_eigenvalues[i] + m_ritz_eigenvalues_i[i]*m_ritz_eigenvalues_i[i]; for (int i = 0; i < subspace_sz; ++i) kv.push_back(std::make_pair( i, magnitude[i])); // this is a reverse key value sort by magnitude // in this case we select the largest magnitude std::sort(kv.begin(), kv.end(), cmp_LM<ValueType_>); // Now we need to reorder the vectors accordingly std::vector<ValueType_> ritz_tmp(m_ritz_eigenvectors); std::vector<ValueType_> ev(m_ritz_eigenvalues); std::vector<ValueType_> ev_i(m_ritz_eigenvalues_i); for (int i = 0; i < subspace_sz; ++i) { //COUT() << "reordrering : " << kv[i].first <<std::endl // << "start : " <<kv[i].first*subspace_sz<<std::endl // << "end : " <<kv[i].first*subspace_sz+subspace_sz<<std::endl // << "out : " <<i*subspace_sz<<std::endl; std::copy(ritz_tmp.begin() + (kv[i].first*subspace_sz), ritz_tmp.begin() + (kv[i].first*subspace_sz + subspace_sz), m_ritz_eigenvectors.begin()+(i*subspace_sz)); m_ritz_eigenvalues[i] = ev[kv[i].first]; m_ritz_eigenvalues_i[i] = ev_i[kv[i].first]; } } #if __cplusplus <= 199711L template<typename ValueType_> bool cmp_SR(const std::pair<int,ValueType_> &left, const std::pair<int,ValueType_> &right){ return left.second < right.second; }; #endif template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::SR(int subspace_sz) { // Eigen values of interest have the largest real part std::vector<std::pair<int,ValueType_> > items; for (int i = 0; i < subspace_sz; ++i) items.push_back(std::make_pair( i, m_ritz_eigenvalues[i])); // this is a reverse key value sort by algebraic value // in this case we select the largest eigenvalues // In the future we can add other shift selection strategies here // to converge to different eigen values (reverse sort by magnitude, or usual sort by magnitude etc ). #if __cplusplus > 199711L std::sort(items.begin(), items.end(),[](const std::pair<int,ValueType_> &left, const std::pair<int,ValueType_> &right) {return left.second < right.second; }); #else std::sort(items.begin(), items.end(), cmp_SR<ValueType_>); #endif // Now we need to reorder the vectors accordingly std::vector<ValueType_> ritz_tmp(m_ritz_eigenvectors); for (int i = 0; i < subspace_sz; ++i) { //COUT() << "reordrering : " << items[i].first <<std::endl // << "start : " <<items[i].first*subspace_sz<<std::endl // << "end : " <<items[i].first*subspace_sz+subspace_sz<<std::endl // << "out : " <<i*subspace_sz<<std::endl; std::copy(ritz_tmp.begin() + (items[i].first*subspace_sz), ritz_tmp.begin() + (items[i].first*subspace_sz + subspace_sz), m_ritz_eigenvectors.begin()+(i*subspace_sz)); m_ritz_eigenvalues[i] = items[i].second; } // dump_host_vec(m_ritz_eigenvalues); } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::qr_step() { ValueType_ mu, mu_i, mu_i_sq; int n = m_select; int ld = m_select; std::vector<ValueType> tau(n); std::vector<ValueType> work(n); int lwork = -1; // workspace query std::copy (m_H_select.begin(),m_H_select.end(), m_H_tmp.begin()); Lapack<ValueType_>::geqrf(n, n, &m_H_tmp[0], ld, &tau[0], &work[0], &lwork); // work is a real array used as workspace. On exit, if LWORK = -1, work[0] contains the optimal LWORK. // it can be safely casted to int here to remove the conversion warning. lwork = static_cast<int>(work[0]); work.resize(lwork); // Q0 = I m_Q.assign(m_Q.size(),0.0); shift(m_Q, m_select, m_select, -1); //for (int j = 0; j < m_select; j++) // m_Q[j*m_select+j] = 1.0; #ifdef IRAM_DEBUG COUT() << "m_ritz_eigenvalues : "<<std::endl; dump_host_vec(m_ritz_eigenvalues); COUT() << "H0 : "<<std::endl; dump_host_dense_mat(m_H_select, m_select); COUT() << "Q0 : "<<std::endl; dump_host_dense_mat(m_Q, m_select); COUT() << "Lwork : " << lwork <<std::endl; #endif int i = m_select-1; while (i >= m_n_eigenvalues) { //Get the shift mu_i = m_ritz_eigenvalues_i[i]; mu = m_ritz_eigenvalues[i]; shift(m_H_tmp, m_select, m_select, mu); if (mu_i ) { //Complex case //Double shift //(H - re_mu*I)^2 + im_mu^2*I) if (i==m_n_eigenvalues) { // if we are in this case we will consume the next eigen value which is a wanted eigenalue // fortunately m_n_eigenvalues = m_nr_eigenvalues +1 (we alway compute one more eigenvalue) m_n_eigenvalues -=1; //COUT() << "IRAM: last ev absorded in double shift" <<std::endl; } //COUT() << "Complex shift"<<std::endl; //COUT() << "shift : " << mu << " " << mu_i << "i" <<std::endl; std::vector<ValueType> A(m_select*m_select); for (int ii = 0; ii < m_select; ii++) for (int k = 0; k < m_select; k++) for (int j = 0; j < m_select; j++) A[ii*m_select+j] += m_H_tmp[ii*m_select+k]* m_H_tmp[k*m_select+j]; mu_i_sq = mu_i*mu_i; std::copy (A.begin(),A.end(), m_H_tmp.begin()); shift(m_H_tmp, m_select, m_select, -mu_i_sq); //COUT() << "H"<< m_select-i<<std::endl; //dump_host_dense_mat(m_H_tmp, m_select); } // [Q,R] = qr(H - mu*I); Lapack<ValueType_>::geqrf(n, n, &m_H_tmp[0], ld, &tau[0], &work[0], &lwork); //H+ = (Q)'* H * Q ; Lapack<ValueType_>::ormqr(false, true, n, n, n, &m_H_tmp[0], ld, &tau[0], &m_H_select[0], n, &work[0], &lwork); Lapack<ValueType_>::ormqr(true, false, n, n, n, &m_H_tmp[0], ld, &tau[0], &m_H_select[0], n, &work[0], &lwork); //Q+ = Q+*Q; Lapack<ValueType_>::ormqr(true, false, n, n, n, &m_H_tmp[0], ld, &tau[0], &m_Q[0], n, &work[0], &lwork); // clean up below subdiagonal (column major storage) cleanup_subspace(m_H_select, m_select,m_select); //for (int j = 0; j < m_select-1; j++) // for (int k = j+2; k < m_select; k++) // m_H_select[j*m_select + k] = 0; //COUT() << "shift : " << mu <<std::endl; //COUT() << "H"<< m_select-i<<std::endl; //dump_host_dense_mat(m_H_select, m_select); //COUT() << "Q"<< m_select-i <<std::endl; //dump_host_dense_mat(m_Q, m_select); std::copy (m_H_select.begin(),m_H_select.end(), m_H_tmp.begin()); // Example for how to explicitly form Q // Lapack<ValueType_>::orgqr(n, n, n, &m_H_tmp[0], ld, &tau[0], &work[0], &lwork); // std::copy (m_H_tmp.begin(),m_H_tmp.end(), m_Q.begin()); if (mu_i) i-=2; //complex else i-=1; //real } } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::refine_basis() { ValueType_ alpha, beta; // update f (and send on dev at some point) // Back to row major -> transpose Q and mind which element we pick in H (ie stored as Ht). // copy Q to dev // Need Mat1*Mat2, where Mat1(n,m) is tall, skin, dense and Mat2(m,l) is small dense with l<m and m<<n // something like f+1 = V(:,1:m)*Q(:,n_ev+1)*H(n_ev+1,n_ev) + f*Q(m,n_ev); // ie vec = Lmat Svec scal +Svec scal , all dense (L=large S=small) // just local small name for variables int n = m_A.get_num_vertices(), nev = m_n_eigenvalues, nk = m_select; m_Q_d.fill(0); ValueType_ *fptr = m_V_tmp.raw()+n*nev; // = Vi[nev] cudaMemcpyAsync(m_Q_d.raw(), &m_Q[0], (size_t)(m_select*m_select*sizeof(m_Q[0])), cudaMemcpyHostToDevice); cudaCheckError(); cudaMemcpyAsync(fptr, m_Vi[nk], (size_t)(n*sizeof(ValueType_)), cudaMemcpyDeviceToDevice); cudaCheckError(); alpha = m_Q[(nev-1) * nk + nk - 1]; beta = 1.0; // retrieve f from v[m_select] if needed // We could also store the vector f for each nested subspace if (m_select!=m_krylov_size) Cublas::scal(n, m_beta, fptr, 1); Cublas::scal(n, alpha, fptr, 1); alpha = m_H_select[(nev-1) * nk + nev ]; Cublas::gemm(false, false, n, 1, nk, &alpha, m_V.raw(), n, m_Q_d.raw(), nk, &beta, fptr, n); //COUT() << "f+ : "<<std::endl; //m_V_tmp.dump(2*n,n); //COUT() <<std::endl; //V(:,1:m)*Q(:,n_ev+1)*H(n_ev+1,n_ev) // ie Lmat = Lmat * Smat, all dense (L=large S=small) // <=> tmpT = H(n_ev, n_ev+1) V*Q in col maj alpha = 1.0; beta = 0.0; // debug cleaning //m_Q_d.fill(0); //cudaMemcpyAsync(m_Q_d.raw(), &m_Q[0], (size_t)(nev*m_select*sizeof(m_Q[0])), cudaMemcpyHostToDevice); //fill_raw_vec (m_V_tmp.raw(), n*(nev+1), beta); //fill_raw_vec (m_V.raw()+n*nk, n, beta); //COUT() << "QT : "<<std::endl; //m_Q_d.dump(0,m_select); //m_Q_d.dump(1*m_select, m_select); //m_Q_d.dump(2*m_select, m_select); //m_Q_d.dump(3*m_select, m_select); //COUT() <<std::endl; //COUT() << "VT : "<<std::endl; //m_V.dump(0,n); //m_V.dump(1*n,n); //m_V.dump(2*n,n); //m_V.dump(3*n,n); ////m_V.dump(4*n,n); //COUT() <<std::endl; //cudaDeviceSynchronize(); Cublas::gemm(false, false, n, nev, nk, &alpha, m_V.raw(), n, m_Q_d.raw(), nk, &beta, m_V_tmp.raw(), n); m_V.copy(m_V_tmp); // update H if (m_miramns) { for(int i = 0; i<m_select; i++) for(int j = 0; j<m_select; j++) m_H[i*m_krylov_size+j] = m_H_select[i*m_select+j]; cleanup_subspace(m_H, m_krylov_size,m_n_eigenvalues); } } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::compute_eigenvectors() { //dump_host_vec(m_ritz_eigenvalues); //dump_host_dense_mat(m_ritz_eigenvectors,m_select); int n = m_A.get_num_vertices(), nev = m_nr_eigenvalues, nk = m_select; ValueType_ alpha=1.0, beta = 0.0; cudaMemcpyAsync(m_ritz_eigenvectors_d.raw(), &m_ritz_eigenvectors[0], (size_t)(m_select*m_select*sizeof(m_ritz_eigenvectors[0])), cudaMemcpyHostToDevice); cudaCheckError(); Cublas::gemm(false, false, n, nev, nk, &alpha, m_V.raw(), n, m_ritz_eigenvectors_d.raw(), nk, &beta, m_eigenvectors.raw(), n); //nrm 1 for pagerank if(m_markov) Cublas::scal(n, (ValueType_)1.0/m_eigenvectors.nrm1(), m_eigenvectors.raw(), 1); #ifdef IRAM_DEBUG COUT()<<std::endl <<"---------------------------------------------"<<std::endl <<" EIGENVECTORS "<<std::endl <<"---------------------------------------------"<<std::endl; for (int i = 0; i < m_nr_eigenvalues; ++i) m_eigenvectors.dump(n*i,n); COUT() <<std::endl; #endif } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::cleanup_subspace(std::vector<ValueType_>& v, int ld, int new_sz) { // just a simple clean // In Out // * * 0 0 0 * * 0 0 0 // * * * 0 0 * * * 0 0 // * * * * 0 * * * * 0 // * * * * * * * * * 0 <--- new_sz // * * * * * 0 0 0 0 0 for (int i = 0; i < new_sz-1; i++) for (int j = i+2; j < new_sz; j++) v[i*ld + j] = 0; for (int i = new_sz; i < ld; i++) for (int j = 0; j < ld; j++) v[i*ld + j] = 0; for (int i = 0; i < new_sz; i++) for (int j = new_sz; j < ld; j++) v[i*ld + j] = 0; // Not used anymore // In Out // * * 0 0 0 0 0 0 0 0 // * * * 0 0 0 0 0 0 0 // * * * * 0 * * 0 0 0 <--- new_sz // * * * * * * * * 0 0 // * * * * * * * * 0 0 //int k = ld-new_sz; //for (int i = 0; i < ld; ++i) // for (int j = 0; j < ld; ++j) // if ((i < k) || // (j >= new_sz) || // (i >= k && j-1 > i-k )) // v[i*ld+j] = 0.0; } template <typename IndexType_, typename ValueType_> void ImplicitArnoldi<IndexType_, ValueType_>::shift(std::vector<ValueType_>& H, int ld, int m, ValueType mu) { #ifdef IRAM_DEBUG dump_host_dense_mat(H,ld); #endif int start = ld-m; for (int i = start; i < ld; i++) H[i*ld+i-start] -= mu; #ifdef IRAM_DEBUG dump_host_dense_mat(H,ld); #endif } template <typename IndexType_, typename ValueType_> std::vector<ValueType_> ImplicitArnoldi<IndexType_, ValueType_>::get_f_copy() { std::vector<ValueType> tmp(m_A.get_num_vertices()); cudaMemcpyAsync(&tmp[0],m_Vi[m_krylov_size], (size_t)(m_A.get_num_vertices()*sizeof(ValueType_)), cudaMemcpyDeviceToHost); cudaCheckError(); return tmp; } template <typename IndexType_, typename ValueType_> std::vector<ValueType_> ImplicitArnoldi<IndexType_, ValueType_>::get_fp_copy() { std::vector<ValueType> tmp(m_A.get_num_vertices()); cudaMemcpyAsync(&tmp[0],m_Vi[m_n_eigenvalues], (size_t)(m_A.get_num_vertices()*sizeof(ValueType_)), cudaMemcpyDeviceToHost); cudaCheckError(); return tmp; } template <typename IndexType_, typename ValueType_> std::vector<ValueType_> ImplicitArnoldi<IndexType_, ValueType_>::get_V_copy() { std::vector<ValueType> tmp(m_A.get_num_vertices()*(m_krylov_size+1)); cudaMemcpyAsync(&tmp[0],m_V.raw(), (size_t)(m_A.get_num_vertices()*(m_krylov_size+1)*sizeof(ValueType_)), cudaMemcpyDeviceToHost); cudaCheckError(); return tmp; } template class ImplicitArnoldi<int, double>; template class ImplicitArnoldi<int, float>; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/nvgraph.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include <cstdlib> #include <climits> #include <cfloat> #include <vector> #include <nvlouvain.cuh> #include <jaccard_gpu.cuh> #include <cusolverDn.h> #include <nvgraph_error.hxx> #include <cnmem_shared_ptr.hxx> #include <valued_csr_graph.hxx> #include <multi_valued_csr_graph.hxx> #include <nvgraph_vector.hxx> #include <nvgraph_cusparse.hxx> #include <nvgraph_cublas.hxx> #include <nvgraph_csrmv.hxx> #include <pagerank.hxx> #include <arnoldi.hxx> #include <sssp.hxx> #include <widest_path.hxx> #include <partition.hxx> #include <nvgraph_convert.hxx> #include <size2_selector.hxx> #include <modularity_maximization.hxx> #include <bfs.hxx> #include <triangles_counting.hxx> #include <csrmv_cub.h> #include <nvgraph.h> // public header **This is NVGRAPH C API** #include <nvgraphP.h> // private header, contains structures, and potentially other things, used in the public C API that should never be exposed. #include <nvgraph_experimental.h> // experimental header, contains hidden API entries, can be shared only under special circumstances without reveling internal things #include "debug_macros.h" #include "2d_partitioning.h" #include "bfs2d.hxx" static inline int check_context(const nvgraphHandle_t h) { int ret = 0; if (h == NULL || !h->nvgraphIsInitialized) ret = 1; return ret; } static inline int check_graph(const nvgraphGraphDescr_t d) { int ret = 0; if (d == NULL || d->graphStatus == IS_EMPTY) ret = 1; return ret; } static inline int check_topology(const nvgraphGraphDescr_t d) { int ret = 0; if (d->graphStatus == IS_EMPTY) ret = 1; return ret; } static inline int check_int_size(size_t sz) { int ret = 0; if (sz >= INT_MAX) ret = 1; return ret; } static inline int check_int_ptr(const int* p) { int ret = 0; if (!p) ret = 1; return ret; } static inline int check_uniform_type_array(const cudaDataType_t * t, size_t sz) { int ret = 0; cudaDataType_t uniform_type = t[0]; for (size_t i = 1; i < sz; i++) { if (t[i] != uniform_type) ret = 1; } return ret; } template<typename T> bool check_ptr(const T* p) { bool ret = false; if (!p) ret = true; return ret; } namespace nvgraph { //TODO: make those template functions in a separate header to be included by both //graph_extractor.cu and nvgraph.cpp; //right now this header does not exist and including graph_concrete_visitors.hxx //doesn't compile because of the Thrust code; // extern CsrGraph<int>* extract_subgraph_by_vertices(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream); extern MultiValuedCsrGraph<int, float>* extract_subgraph_by_vertices(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream); extern MultiValuedCsrGraph<int, double>* extract_subgraph_by_vertices(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream); extern CsrGraph<int>* extract_subgraph_by_edges(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream); extern MultiValuedCsrGraph<int, float>* extract_subgraph_by_edges(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream); extern MultiValuedCsrGraph<int, double>* extract_subgraph_by_edges(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream); #ifndef NVGRAPH_LIGHT extern CsrGraph<int>* contract_graph_csr_mul(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern CsrGraph<int>* contract_graph_csr_sum(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern CsrGraph<int>* contract_graph_csr_min(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern CsrGraph<int>* contract_graph_csr_max(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern MultiValuedCsrGraph<int, float>* contract_graph_mv_float_mul(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern MultiValuedCsrGraph<int, float>* contract_graph_mv_float_sum(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern MultiValuedCsrGraph<int, float>* contract_graph_mv_float_min(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern MultiValuedCsrGraph<int, float>* contract_graph_mv_float_max(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern MultiValuedCsrGraph<int, double>* contract_graph_mv_double_mul(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern MultiValuedCsrGraph<int, double>* contract_graph_mv_double_sum(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern MultiValuedCsrGraph<int, double>* contract_graph_mv_double_min(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); extern MultiValuedCsrGraph<int, double>* contract_graph_mv_double_max(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce); #endif nvgraphStatus_t getCAPIStatusForError(NVGRAPH_ERROR err) { nvgraphStatus_t ret = NVGRAPH_STATUS_SUCCESS; switch (err) { case NVGRAPH_OK: ret = NVGRAPH_STATUS_SUCCESS; break; case NVGRAPH_ERR_BAD_PARAMETERS: ret = NVGRAPH_STATUS_INVALID_VALUE; break; case NVGRAPH_ERR_UNKNOWN: ret = NVGRAPH_STATUS_INTERNAL_ERROR; break; case NVGRAPH_ERR_CUDA_FAILURE: ret = NVGRAPH_STATUS_EXECUTION_FAILED; break; case NVGRAPH_ERR_THRUST_FAILURE: ret = NVGRAPH_STATUS_EXECUTION_FAILED; break; case NVGRAPH_ERR_IO: ret = NVGRAPH_STATUS_INTERNAL_ERROR; break; case NVGRAPH_ERR_NOT_IMPLEMENTED: ret = NVGRAPH_STATUS_INVALID_VALUE; break; case NVGRAPH_ERR_NO_MEMORY: ret = NVGRAPH_STATUS_ALLOC_FAILED; break; case NVGRAPH_ERR_NOT_CONVERGED: ret = NVGRAPH_STATUS_NOT_CONVERGED; break; default: ret = NVGRAPH_STATUS_INTERNAL_ERROR; } return ret; } extern "C" { const char* nvgraphStatusGetString(nvgraphStatus_t status) { switch (status) { case NVGRAPH_STATUS_SUCCESS: return "Success"; case NVGRAPH_STATUS_NOT_INITIALIZED: return "nvGRAPH not initialized"; case NVGRAPH_STATUS_ALLOC_FAILED: return "nvGRAPH alloc failed"; case NVGRAPH_STATUS_INVALID_VALUE: return "nvGRAPH invalid value"; case NVGRAPH_STATUS_ARCH_MISMATCH: return "nvGRAPH arch mismatch"; case NVGRAPH_STATUS_MAPPING_ERROR: return "nvGRAPH mapping error"; case NVGRAPH_STATUS_EXECUTION_FAILED: return "nvGRAPH execution failed"; case NVGRAPH_STATUS_INTERNAL_ERROR: return "nvGRAPH internal error"; case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED: return "nvGRAPH type not supported"; case NVGRAPH_STATUS_NOT_CONVERGED: return "nvGRAPH algorithm failed to converge"; case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED: return "nvGRAPH graph type not supported"; default: return "Unknown nvGRAPH Status"; } } ; } static nvgraphStatus_t nvgraphCreateMulti_impl(struct nvgraphContext **outCtx, int numDevices, int* _devices) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { int device; CHECK_CUDA(cudaFree((void * )0)); CHECK_CUDA(cudaGetDevice(&device)); struct nvgraphContext *ctx = NULL; ctx = (struct nvgraphContext *) malloc(sizeof(*ctx)); if (!ctx) { FatalError("Cannot allocate NVGRAPH context.", NVGRAPH_ERR_UNKNOWN); } //cnmem memset(&ctx->cnmem_device, 0, sizeof(ctx->cnmem_device)); // init all to 0 ctx->cnmem_device.device = device; // cnmem runs on the device set by cudaSetDevice size_t init_alloc = 1; // Initial allocation tentative, it is currently 1 so this feature is basically disabeled. // Warning : Should uncomment that if using init_alloc > 1 //size_t freeMem, totalMem; //cudaMemGetInfo(&freeMem, &totalMem); //if (freeMem < init_alloc) // Couldn't find enough memory to do the initial alloc // init_alloc = 1; // (0 is used as default parameter in cnmem) ctx->cnmem_device.size = init_alloc; cnmemDevice_t* devices = (cnmemDevice_t*) malloc(sizeof(cnmemDevice_t) * numDevices); memset(devices, 0, sizeof(cnmemDevice_t) * numDevices); for (int i = 0; i < numDevices; i++) { devices[i].device = _devices[i]; devices[i].size = 1; } cnmemStatus_t cm_status = cnmemInit(numDevices, devices, CNMEM_FLAGS_DEFAULT); free(devices); if (cm_status != CNMEM_STATUS_SUCCESS) FatalError("Cannot initialize memory manager.", NVGRAPH_ERR_UNKNOWN); //Cublas and Cusparse nvgraph::Cusparse::get_handle(); nvgraph::Cublas::get_handle(); //others ctx->stream = 0; ctx->nvgraphIsInitialized = true; if (outCtx) { *outCtx = ctx; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } static nvgraphStatus_t nvgraphCreate_impl(struct nvgraphContext **outCtx) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { int device; CHECK_CUDA(cudaFree((void * )0)); CHECK_CUDA(cudaGetDevice(&device)); struct nvgraphContext *ctx = NULL; ctx = (struct nvgraphContext *) malloc(sizeof(*ctx)); if (!ctx) { FatalError("Cannot allocate NVGRAPH context.", NVGRAPH_ERR_UNKNOWN); } //cnmem memset(&ctx->cnmem_device, 0, sizeof(ctx->cnmem_device)); // init all to 0 ctx->cnmem_device.device = device; // cnmem runs on the device set by cudaSetDevice size_t init_alloc = 1; // Initial allocation tentative, it is currently 1 so this feature is basically disabeled. // Warning : Should uncomment that if using init_alloc > 1 //size_t freeMem, totalMem; //cudaMemGetInfo(&freeMem, &totalMem); //if (freeMem < init_alloc) // Couldn't find enough memory to do the initial alloc // init_alloc = 1; // (0 is used as default parameter in cnmem) ctx->cnmem_device.size = init_alloc; cnmemStatus_t cm_status = cnmemInit(1, &ctx->cnmem_device, CNMEM_FLAGS_DEFAULT); if (cm_status != CNMEM_STATUS_SUCCESS) FatalError("Cannot initialize memory manager.", NVGRAPH_ERR_UNKNOWN); //Cublas and Cusparse nvgraph::Cusparse::get_handle(); nvgraph::Cublas::get_handle(); //others ctx->stream = 0; ctx->nvgraphIsInitialized = true; if (outCtx) { *outCtx = ctx; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } static nvgraphStatus_t nvgraphDestroy_impl(nvgraphHandle_t handle) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle)) FatalError("Cannot initialize memory manager.", NVGRAPH_ERR_NO_MEMORY); //Cublas and Cusparse nvgraph::Cusparse::destroy_handle(); nvgraph::Cublas::destroy_handle(); //cnmem // compiler is complaining, cm_status is not used in release build #ifdef DEBUG cnmemStatus_t cm_status = cnmemFinalize(); if( cm_status != CNMEM_STATUS_SUCCESS ) { CERR() << "Warning: " << cnmemGetErrorString(cm_status) << std::endl; } #else cnmemFinalize(); #endif //others free(handle); } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } static nvgraphStatus_t nvgraphCreateGraphDescr_impl(nvgraphHandle_t handle, struct nvgraphGraphDescr **outGraphDescr) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); struct nvgraphGraphDescr *descrG = NULL; descrG = (struct nvgraphGraphDescr*) malloc(sizeof(*descrG)); if (!descrG) { FatalError("Cannot allocate graph descriptor.", NVGRAPH_ERR_UNKNOWN); } descrG->graphStatus = IS_EMPTY; if (outGraphDescr) { *outGraphDescr = descrG; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } static nvgraphStatus_t nvgraphDestroyGraphDescr_impl(nvgraphHandle_t handle, struct nvgraphGraphDescr *descrG) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG) { if (descrG->TT == NVGRAPH_2D_32I_32I) { switch (descrG->T) { case CUDA_R_32I: { nvgraph::Matrix2d<int32_t, int32_t, int32_t>* m = static_cast<nvgraph::Matrix2d<int32_t, int32_t, int32_t>*>(descrG->graph_handle); delete m; break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } else { switch (descrG->graphStatus) { case IS_EMPTY: { break; } case HAS_TOPOLOGY: { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); delete CSRG; break; } case HAS_VALUES: { if (descrG->T == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); delete MCSRG; } else if (descrG->T == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); delete MCSRG; } else if (descrG->T == CUDA_R_32I) { nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, int>*>(descrG->graph_handle); delete MCSRG; } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; break; } default: return NVGRAPH_STATUS_INVALID_VALUE; } } free(descrG); } else return NVGRAPH_STATUS_INVALID_VALUE; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphSetStream_impl(nvgraphHandle_t handle, cudaStream_t stream) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); //CnMem cnmemStatus_t cm_status = cnmemRegisterStream(stream); if (cm_status != CNMEM_STATUS_SUCCESS) return NVGRAPH_STATUS_INTERNAL_ERROR; // nvgraph handle handle->stream = stream; //Cublas and Cusparse nvgraph::Cublas::setStream(stream); nvgraph::Cusparse::setStream(stream); } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphSetGraphStructure_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void* topologyData, nvgraphTopologyType_t TT) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != IS_EMPTY) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (check_ptr(topologyData)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (TT == NVGRAPH_CSR_32 || TT == NVGRAPH_CSC_32) { int v = 0, e = 0, *neighborhood = NULL, *edgedest = NULL; switch (TT) { case NVGRAPH_CSR_32: { nvgraphCSRTopology32I_t t = static_cast<nvgraphCSRTopology32I_t>(topologyData); if (!t->nvertices || !t->nedges || check_ptr(t->source_offsets) || check_ptr(t->destination_indices)) return NVGRAPH_STATUS_INVALID_VALUE; v = t->nvertices; e = t->nedges; neighborhood = t->source_offsets; edgedest = t->destination_indices; break; } case NVGRAPH_CSC_32: { nvgraphCSCTopology32I_t t = static_cast<nvgraphCSCTopology32I_t>(topologyData); if (!t->nvertices || !t->nedges || check_ptr(t->destination_offsets) || check_ptr(t->source_indices)) return NVGRAPH_STATUS_INVALID_VALUE; v = t->nvertices; e = t->nedges; neighborhood = t->destination_offsets; edgedest = t->source_indices; break; } default: return NVGRAPH_STATUS_INVALID_VALUE; } descrG->TT = TT; // Create the internal CSR representation nvgraph::CsrGraph<int> * CSRG = new nvgraph::CsrGraph<int>(v, e, handle->stream); CHECK_CUDA(cudaMemcpy(CSRG->get_raw_row_offsets(), neighborhood, (size_t )((CSRG->get_num_vertices() + 1) * sizeof(int)), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(CSRG->get_raw_column_indices(), edgedest, (size_t )((CSRG->get_num_edges()) * sizeof(int)), cudaMemcpyDefault)); // Set the graph handle descrG->graph_handle = CSRG; descrG->graphStatus = HAS_TOPOLOGY; } else if (TT == NVGRAPH_2D_32I_32I) { nvgraph2dCOOTopology32I_t td = static_cast<nvgraph2dCOOTopology32I_t>(topologyData); switch (td->valueType) { case CUDA_R_32I: { if (!td->nvertices || !td->nedges || !td->source_indices || !td->destination_indices || !td->numDevices || !td->devices || !td->blockN) return NVGRAPH_STATUS_INVALID_VALUE; descrG->TT = TT; descrG->graphStatus = HAS_TOPOLOGY; if (td->values) descrG->graphStatus = HAS_VALUES; descrG->T = td->valueType; std::vector<int32_t> devices; for (int32_t i = 0; i < td->numDevices; i++) devices.push_back(td->devices[i]); nvgraph::MatrixDecompositionDescription<int32_t, int32_t> description( td->nvertices, td->blockN, td->nedges, devices); nvgraph::Matrix2d<int32_t, int32_t, int32_t>* m = new nvgraph::Matrix2d<int32_t, int32_t, int32_t>(); *m = nvgraph::COOto2d(description, td->source_indices, td->destination_indices, (int32_t*) td->values); descrG->graph_handle = m; break; } default: { return NVGRAPH_STATUS_INVALID_VALUE; } } } else { return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphAttachGraphStructure_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void* topologyData, nvgraphTopologyType_t TT) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != IS_EMPTY) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (check_ptr(topologyData)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (TT == NVGRAPH_CSR_32 || TT == NVGRAPH_CSC_32) { int v = 0, e = 0, *neighborhood = NULL, *edgedest = NULL; switch (TT) { case NVGRAPH_CSR_32: { nvgraphCSRTopology32I_t t = static_cast<nvgraphCSRTopology32I_t>(topologyData); if (!t->nvertices || !t->nedges || check_ptr(t->source_offsets) || check_ptr(t->destination_indices)) return NVGRAPH_STATUS_INVALID_VALUE; v = t->nvertices; e = t->nedges; neighborhood = t->source_offsets; edgedest = t->destination_indices; break; } case NVGRAPH_CSC_32: { nvgraphCSCTopology32I_t t = static_cast<nvgraphCSCTopology32I_t>(topologyData); if (!t->nvertices || !t->nedges || check_ptr(t->destination_offsets) || check_ptr(t->source_indices)) return NVGRAPH_STATUS_INVALID_VALUE; v = t->nvertices; e = t->nedges; neighborhood = t->destination_offsets; edgedest = t->source_indices; break; } default: return NVGRAPH_STATUS_INVALID_VALUE; } descrG->TT = TT; // Create the internal CSR representation nvgraph::CsrGraph<int> * CSRG = new nvgraph::CsrGraph<int>(v, e, handle->stream); CSRG->set_raw_row_offsets(neighborhood); CSRG->set_raw_column_indices(edgedest); // Set the graph handle descrG->graph_handle = CSRG; descrG->graphStatus = HAS_TOPOLOGY; } else if (TT == NVGRAPH_2D_32I_32I) { nvgraph2dCOOTopology32I_t td = static_cast<nvgraph2dCOOTopology32I_t>(topologyData); switch (td->valueType) { case CUDA_R_32I: { if (!td->nvertices || !td->nedges || !td->source_indices || !td->destination_indices || !td->numDevices || !td->devices || !td->blockN) return NVGRAPH_STATUS_INVALID_VALUE; descrG->TT = TT; descrG->graphStatus = HAS_TOPOLOGY; if (td->values) descrG->graphStatus = HAS_VALUES; descrG->T = td->valueType; std::vector<int32_t> devices; for (int32_t i = 0; i < td->numDevices; i++) devices.push_back(td->devices[i]); nvgraph::MatrixDecompositionDescription<int32_t, int32_t> description( td->nvertices, td->blockN, td->nedges, devices); nvgraph::Matrix2d<int32_t, int32_t, int32_t>* m = new nvgraph::Matrix2d<int32_t, int32_t, int32_t>(); *m = nvgraph::COOto2d(description, td->source_indices, td->destination_indices, (int32_t*) td->values); descrG->graph_handle = m; break; } default: { return NVGRAPH_STATUS_INVALID_VALUE; } } } else { return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphGetGraphStructure_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void* topologyData, nvgraphTopologyType_t* TT) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_topology(descrG)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); nvgraphTopologyType_t graphTType = descrG->TT; if (TT != NULL) *TT = graphTType; if (topologyData != NULL) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int> *>(descrG->graph_handle); int v = static_cast<int>(CSRG->get_num_vertices()); int e = static_cast<int>(CSRG->get_num_edges()); int *neighborhood = NULL, *edgedest = NULL; switch (graphTType) { case NVGRAPH_CSR_32: { nvgraphCSRTopology32I_t t = static_cast<nvgraphCSRTopology32I_t>(topologyData); t->nvertices = static_cast<int>(v); t->nedges = static_cast<int>(e); neighborhood = t->source_offsets; edgedest = t->destination_indices; break; } case NVGRAPH_CSC_32: { nvgraphCSCTopology32I_t t = static_cast<nvgraphCSCTopology32I_t>(topologyData); t->nvertices = static_cast<int>(v); t->nedges = static_cast<int>(e); neighborhood = t->destination_offsets; edgedest = t->source_indices; break; } default: return NVGRAPH_STATUS_INTERNAL_ERROR; } if (neighborhood != NULL) { CHECK_CUDA(cudaMemcpy(neighborhood, CSRG->get_raw_row_offsets(), (size_t )((v + 1) * sizeof(int)), cudaMemcpyDefault)); } if (edgedest != NULL) { CHECK_CUDA(cudaMemcpy(edgedest, CSRG->get_raw_column_indices(), (size_t )((e) * sizeof(int)), cudaMemcpyDefault)); } } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphAllocateVertexData_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t numsets, cudaDataType_t *settypes) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(numsets) || check_ptr(settypes)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (check_uniform_type_array(settypes, numsets)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus == HAS_TOPOLOGY) // need to convert CsrGraph to MultiValuedCsrGraph first { if (*settypes == CUDA_R_32F) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = new nvgraph::MultiValuedCsrGraph< int, float>(*CSRG); descrG->graph_handle = MCSRG; } else if (*settypes == CUDA_R_64F) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = new nvgraph::MultiValuedCsrGraph< int, double>(*CSRG); descrG->graph_handle = MCSRG; } else if (*settypes == CUDA_R_32I) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = new nvgraph::MultiValuedCsrGraph<int, int>(*CSRG); descrG->graph_handle = MCSRG; } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; descrG->T = *settypes; descrG->graphStatus = HAS_VALUES; } else if (descrG->graphStatus == HAS_VALUES) // Already in MultiValuedCsrGraph, just need to check the type { if (*settypes != descrG->T) return NVGRAPH_STATUS_INVALID_VALUE; } else return NVGRAPH_STATUS_INVALID_VALUE; // Allocate and transfer if (*settypes == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); MCSRG->allocateVertexData(numsets, NULL); } else if (*settypes == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); MCSRG->allocateVertexData(numsets, NULL); } else if (*settypes == CUDA_R_32I) { nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, int>*>(descrG->graph_handle); MCSRG->allocateVertexData(numsets, NULL); } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphAttachVertexData_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t setnum, cudaDataType_t settype, void *vertexData) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(setnum)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus == HAS_TOPOLOGY) // need to convert CsrGraph to MultiValuedCsrGraph first { if (settype == CUDA_R_32F) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = new nvgraph::MultiValuedCsrGraph< int, float>(*CSRG); descrG->graph_handle = MCSRG; } else if (settype == CUDA_R_64F) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = new nvgraph::MultiValuedCsrGraph< int, double>(*CSRG); descrG->graph_handle = MCSRG; } else if (settype == CUDA_R_32I) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = new nvgraph::MultiValuedCsrGraph<int, int>(*CSRG); descrG->graph_handle = MCSRG; } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; descrG->T = settype; descrG->graphStatus = HAS_VALUES; } else if (descrG->graphStatus == HAS_VALUES) // Already in MultiValuedCsrGraph, just need to check the type { if (settype != descrG->T) return NVGRAPH_STATUS_INVALID_VALUE; } else return NVGRAPH_STATUS_INVALID_VALUE; // transfer if (settype == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); MCSRG->attachVertexData(setnum, (float*)vertexData, NULL); } else if (settype == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); MCSRG->attachVertexData(setnum, (double*)vertexData, NULL); } else if (settype == CUDA_R_32I) { nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, int>*>(descrG->graph_handle); MCSRG->attachVertexData(setnum, (int*)vertexData, NULL); } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphAllocateEdgeData_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t numsets, cudaDataType_t *settypes) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(numsets) || check_ptr(settypes)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (check_uniform_type_array(settypes, numsets)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); // Look at what kind of graph we have if (descrG->graphStatus == HAS_TOPOLOGY) // need to convert CsrGraph to MultiValuedCsrGraph first { if (*settypes == CUDA_R_32F) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = new nvgraph::MultiValuedCsrGraph< int, float>(*CSRG); descrG->graph_handle = MCSRG; } else if (*settypes == CUDA_R_64F) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = new nvgraph::MultiValuedCsrGraph< int, double>(*CSRG); descrG->graph_handle = MCSRG; } else if (*settypes == CUDA_R_32I) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = new nvgraph::MultiValuedCsrGraph<int, int>(*CSRG); descrG->graph_handle = MCSRG; } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; descrG->T = *settypes; descrG->graphStatus = HAS_VALUES; } else if (descrG->graphStatus == HAS_VALUES) // Already in MultiValuedCsrGraph, just need to check the type { if (*settypes != descrG->T) return NVGRAPH_STATUS_INVALID_VALUE; } else return NVGRAPH_STATUS_INVALID_VALUE; // Allocate and transfer if (*settypes == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); MCSRG->allocateEdgeData(numsets, NULL); } else if (*settypes == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); MCSRG->allocateEdgeData(numsets, NULL); } else if (*settypes == CUDA_R_32I) { nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, int>*>(descrG->graph_handle); MCSRG->allocateEdgeData(numsets, NULL); } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphAttachEdgeData_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t setnum, cudaDataType_t settype, void *edgeData) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(setnum)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); // Look at what kind of graph we have if (descrG->graphStatus == HAS_TOPOLOGY) // need to convert CsrGraph to MultiValuedCsrGraph first { if (settype == CUDA_R_32F) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = new nvgraph::MultiValuedCsrGraph< int, float>(*CSRG); descrG->graph_handle = MCSRG; } else if (settype == CUDA_R_64F) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = new nvgraph::MultiValuedCsrGraph< int, double>(*CSRG); descrG->graph_handle = MCSRG; } else if (settype == CUDA_R_32I) { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = new nvgraph::MultiValuedCsrGraph<int, int>(*CSRG); descrG->graph_handle = MCSRG; } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; descrG->T = settype; descrG->graphStatus = HAS_VALUES; } else if (descrG->graphStatus == HAS_VALUES) // Already in MultiValuedCsrGraph, just need to check the type { if (settype != descrG->T) return NVGRAPH_STATUS_INVALID_VALUE; } else return NVGRAPH_STATUS_INVALID_VALUE; // Allocate and transfer if (settype == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); MCSRG->attachEdgeData(setnum, (float*)edgeData, NULL); } else if (settype == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); MCSRG->attachEdgeData(setnum, (double*)edgeData, NULL); } else if (settype == CUDA_R_32I) { nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, int>*>(descrG->graph_handle); MCSRG->attachEdgeData(setnum, (int*)edgeData, NULL); } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphSetVertexData_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *vertexData, size_t setnum) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(setnum) || check_ptr(vertexData)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph FatalError("Graph should have allocated values.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->T == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy(MCSRG->get_raw_vertex_dim(setnum), (float*) vertexData, (size_t) ((MCSRG->get_num_vertices()) * sizeof(float)), cudaMemcpyDefault); } else if (descrG->T == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy(MCSRG->get_raw_vertex_dim(setnum), (double*) vertexData, (size_t) ((MCSRG->get_num_vertices()) * sizeof(double)), cudaMemcpyDefault); } else if (descrG->T == CUDA_R_32I) { nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, int>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy(MCSRG->get_raw_vertex_dim(setnum), (int*) vertexData, (size_t) ((MCSRG->get_num_vertices()) * sizeof(int)), cudaMemcpyDefault); } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; cudaCheckError() ; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphGetVertexData_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *vertexData, size_t setnum) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(setnum) || check_ptr(vertexData)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph FatalError("Graph should have values.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->T == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy((float*) vertexData, MCSRG->get_raw_vertex_dim(setnum), (size_t) ((MCSRG->get_num_vertices()) * sizeof(float)), cudaMemcpyDefault); } else if (descrG->T == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy((double*) vertexData, MCSRG->get_raw_vertex_dim(setnum), (size_t) ((MCSRG->get_num_vertices()) * sizeof(double)), cudaMemcpyDefault); } else if (descrG->T == CUDA_R_32I) { nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, int>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy((int*) vertexData, MCSRG->get_raw_vertex_dim(setnum), (size_t) ((MCSRG->get_num_vertices()) * sizeof(int)), cudaMemcpyDefault); } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; cudaCheckError() ; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphConvertTopology_impl(nvgraphHandle_t handle, nvgraphTopologyType_t srcTType, void *srcTopology, void *srcEdgeData, cudaDataType_t *dataType, nvgraphTopologyType_t dstTType, void *dstTopology, void *dstEdgeData) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_ptr(dstEdgeData) || check_ptr(srcEdgeData)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); size_t sizeT; if (*dataType == CUDA_R_32F) sizeT = sizeof(float); else if (*dataType == CUDA_R_64F) sizeT = sizeof(double); else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; // Trust me, this better than nested if's. if (srcTType == NVGRAPH_CSR_32 && dstTType == NVGRAPH_CSR_32) { // CSR2CSR nvgraphCSRTopology32I_t srcT = static_cast<nvgraphCSRTopology32I_t>(srcTopology); nvgraphCSRTopology32I_t dstT = static_cast<nvgraphCSRTopology32I_t>(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; CHECK_CUDA(cudaMemcpy(dstT->source_offsets, srcT->source_offsets, (srcT->nvertices + 1) * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstT->destination_indices, srcT->destination_indices, srcT->nedges * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstEdgeData, srcEdgeData, srcT->nedges * sizeT, cudaMemcpyDefault)); } else if (srcTType == NVGRAPH_CSR_32 && dstTType == NVGRAPH_CSC_32) { // CSR2CSC nvgraphCSRTopology32I_t srcT = static_cast<nvgraphCSRTopology32I_t>(srcTopology); nvgraphCSCTopology32I_t dstT = static_cast<nvgraphCSCTopology32I_t>(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; csr2csc(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_offsets, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_offsets, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO, dataType); } else if (srcTType == NVGRAPH_CSR_32 && dstTType == NVGRAPH_COO_32) { // CSR2COO nvgraphCSRTopology32I_t srcT = static_cast<nvgraphCSRTopology32I_t>(srcTopology); nvgraphCOOTopology32I_t dstT = static_cast<nvgraphCOOTopology32I_t>(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if (dstT->tag == NVGRAPH_SORTED_BY_SOURCE || dstT->tag == NVGRAPH_DEFAULT || dstT->tag == NVGRAPH_UNSORTED) { csr2coo(srcT->source_offsets, srcT->nedges, srcT->nvertices, dstT->source_indices, CUSPARSE_INDEX_BASE_ZERO); CHECK_CUDA(cudaMemcpy(dstT->destination_indices, srcT->destination_indices, srcT->nedges * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstEdgeData, srcEdgeData, srcT->nedges * sizeT, cudaMemcpyDefault)); } else if (dstT->tag == NVGRAPH_SORTED_BY_DESTINATION) { // Step 1: Convert to COO_Source csr2coo(srcT->source_offsets, srcT->nedges, srcT->nvertices, dstT->source_indices, CUSPARSE_INDEX_BASE_ZERO); // Step 2: Convert to COO_Destination cooSortByDestination(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, dstT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_indices, CUSPARSE_INDEX_BASE_ZERO, dataType); } else { return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } /////////////////////////////////////////////////////////////////////////////////////////////////////////// } else if (srcTType == NVGRAPH_CSC_32 && dstTType == NVGRAPH_CSR_32) { // CSC2CSR nvgraphCSCTopology32I_t srcT = static_cast<nvgraphCSCTopology32I_t>(srcTopology); nvgraphCSRTopology32I_t dstT = static_cast<nvgraphCSRTopology32I_t>(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; csc2csr(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_offsets, dstEdgeData, dstT->source_offsets, dstT->destination_indices, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO, dataType); } else if (srcTType == NVGRAPH_CSC_32 && dstTType == NVGRAPH_CSC_32) { // CSC2CSC nvgraphCSCTopology32I_t srcT = static_cast<nvgraphCSCTopology32I_t>(srcTopology); nvgraphCSCTopology32I_t dstT = static_cast<nvgraphCSCTopology32I_t>(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; CHECK_CUDA(cudaMemcpy(dstT->destination_offsets, srcT->destination_offsets, (srcT->nvertices + 1) * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstT->source_indices, srcT->source_indices, srcT->nedges * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstEdgeData, srcEdgeData, srcT->nedges * sizeT, cudaMemcpyDefault)); } else if (srcTType == NVGRAPH_CSC_32 && dstTType == NVGRAPH_COO_32) { // CSC2COO nvgraphCSCTopology32I_t srcT = static_cast<nvgraphCSCTopology32I_t>(srcTopology); nvgraphCOOTopology32I_t dstT = static_cast<nvgraphCOOTopology32I_t>(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if (dstT->tag == NVGRAPH_SORTED_BY_SOURCE) { // Step 1: Convert to COO_Destination csr2coo(srcT->destination_offsets, srcT->nedges, srcT->nvertices, dstT->destination_indices, CUSPARSE_INDEX_BASE_ZERO); // Step 2: Convert to COO_Source cooSortBySource(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_indices, dstT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_indices, CUSPARSE_INDEX_BASE_ZERO, dataType); } else if (dstT->tag == NVGRAPH_SORTED_BY_DESTINATION || dstT->tag == NVGRAPH_DEFAULT || dstT->tag == NVGRAPH_UNSORTED) { csr2coo(srcT->destination_offsets, srcT->nedges, srcT->nvertices, dstT->destination_indices, CUSPARSE_INDEX_BASE_ZERO); CHECK_CUDA(cudaMemcpy(dstT->source_indices, srcT->source_indices, srcT->nedges * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstEdgeData, srcEdgeData, srcT->nedges * sizeT, cudaMemcpyDefault)); } else { return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } /////////////////////////////////////////////////////////////////////////////////////////////////////////// } else if (srcTType == NVGRAPH_COO_32 && dstTType == NVGRAPH_CSR_32) { // COO2CSR nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t>(srcTopology); nvgraphCSRTopology32I_t dstT = static_cast<nvgraphCSRTopology32I_t>(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if (srcT->tag == NVGRAPH_SORTED_BY_SOURCE) { coo2csr(srcT->source_indices, srcT->nedges, srcT->nvertices, dstT->source_offsets, CUSPARSE_INDEX_BASE_ZERO); CHECK_CUDA(cudaMemcpy(dstT->destination_indices, srcT->destination_indices, srcT->nedges * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstEdgeData, srcEdgeData, srcT->nedges * sizeT, cudaMemcpyDefault)); } else if (srcT->tag == NVGRAPH_SORTED_BY_DESTINATION) { cood2csr(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_offsets, dstT->destination_indices, CUSPARSE_INDEX_BASE_ZERO, dataType); } else if (srcT->tag == NVGRAPH_DEFAULT || srcT->tag == NVGRAPH_UNSORTED) { coou2csr(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_offsets, dstT->destination_indices, CUSPARSE_INDEX_BASE_ZERO, dataType); } else { return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } else if (srcTType == NVGRAPH_COO_32 && dstTType == NVGRAPH_CSC_32) { // COO2CSC nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t>(srcTopology); nvgraphCSCTopology32I_t dstT = static_cast<nvgraphCSCTopology32I_t>(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if (srcT->tag == NVGRAPH_SORTED_BY_SOURCE) { coos2csc(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_offsets, CUSPARSE_INDEX_BASE_ZERO, dataType); } else if (srcT->tag == NVGRAPH_SORTED_BY_DESTINATION) { coo2csr(srcT->destination_indices, srcT->nedges, srcT->nvertices, dstT->destination_offsets, CUSPARSE_INDEX_BASE_ZERO); CHECK_CUDA(cudaMemcpy(dstT->source_indices, srcT->source_indices, srcT->nedges * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstEdgeData, srcEdgeData, srcT->nedges * sizeT, cudaMemcpyDefault)); } else if (srcT->tag == NVGRAPH_DEFAULT || srcT->tag == NVGRAPH_UNSORTED) { coou2csc(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_offsets, CUSPARSE_INDEX_BASE_ZERO, dataType); } else { return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } else if (srcTType == NVGRAPH_COO_32 && dstTType == NVGRAPH_COO_32) { // COO2COO nvgraphCOOTopology32I_t srcT = static_cast<nvgraphCOOTopology32I_t>(srcTopology); nvgraphCOOTopology32I_t dstT = static_cast<nvgraphCOOTopology32I_t>(dstTopology); dstT->nvertices = srcT->nvertices; dstT->nedges = srcT->nedges; if (srcT->tag == dstT->tag || dstT->tag == NVGRAPH_DEFAULT || dstT->tag == NVGRAPH_UNSORTED) { CHECK_CUDA(cudaMemcpy(dstT->source_indices, srcT->source_indices, srcT->nedges * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstT->destination_indices, srcT->destination_indices, srcT->nedges * sizeof(int), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy(dstEdgeData, srcEdgeData, srcT->nedges * sizeT, cudaMemcpyDefault)); } else if (dstT->tag == NVGRAPH_SORTED_BY_SOURCE) { cooSortBySource(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_indices, CUSPARSE_INDEX_BASE_ZERO, dataType); } else if (dstT->tag == NVGRAPH_SORTED_BY_DESTINATION) { cooSortByDestination(srcT->nvertices, srcT->nvertices, srcT->nedges, srcEdgeData, srcT->source_indices, srcT->destination_indices, dstEdgeData, dstT->source_indices, dstT->destination_indices, CUSPARSE_INDEX_BASE_ZERO, dataType); } else { return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } /////////////////////////////////////////////////////////////////////////////////////////////////////////// } else { return NVGRAPH_STATUS_INVALID_VALUE; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphSetEdgeData_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *edgeData, size_t setnum) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(setnum) || check_ptr(edgeData)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->T == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_edge_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy(MCSRG->get_raw_edge_dim(setnum), (float*) edgeData, (size_t) ((MCSRG->get_num_edges()) * sizeof(float)), cudaMemcpyDefault); } else if (descrG->T == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_edge_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy(MCSRG->get_raw_edge_dim(setnum), (double*) edgeData, (size_t) ((MCSRG->get_num_edges()) * sizeof(double)), cudaMemcpyDefault); } else if (descrG->T == CUDA_R_32I) { nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, int>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_edge_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy(MCSRG->get_raw_edge_dim(setnum), (int*) edgeData, (size_t) ((MCSRG->get_num_edges()) * sizeof(int)), cudaMemcpyDefault); } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; cudaCheckError() ; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphGetEdgeData_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *edgeData, size_t setnum) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(setnum) || check_ptr(edgeData)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->T == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_edge_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy((float*) edgeData, MCSRG->get_raw_edge_dim(setnum), (size_t) ((MCSRG->get_num_edges()) * sizeof(float)), cudaMemcpyDefault); } else if (descrG->T == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (setnum >= MCSRG->get_num_edge_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; cudaMemcpy((double*) edgeData, MCSRG->get_raw_edge_dim(setnum), (size_t) ((MCSRG->get_num_edges()) * sizeof(double)), cudaMemcpyDefault); } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; cudaCheckError() ; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphSrSpmv_impl_cub(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t x, const void *beta, const size_t y, const nvgraphSemiring_t SR) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { // some basic checks if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); rc = SemiringAPILauncher(handle, descrG, weight_index, alpha, x, beta, y, SR); } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphSssp_impl(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int *source_vert, const size_t sssp) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index) || check_int_ptr(source_vert)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->TT != NVGRAPH_CSC_32) // supported topologies return NVGRAPH_STATUS_INVALID_VALUE; // cudaError_t cuda_status; if (descrG->graphStatus != HAS_VALUES) return NVGRAPH_STATUS_INVALID_VALUE; switch (descrG->T) { case CUDA_R_32F: { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || sssp >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; int n = static_cast<int>(MCSRG->get_num_vertices()); nvgraph::Vector<float> co(n, handle->stream); nvgraph::Sssp<int, float> sssp_solver(*MCSRG->get_valued_csr_graph(weight_index)); nvgraph::set_connectivity<int, float>(n, *source_vert, 0.0, FLT_MAX, co.raw()); MCSRG->get_vertex_dim(sssp).copy(co); rc = sssp_solver.solve(*source_vert, co, MCSRG->get_vertex_dim(sssp)); break; } case CUDA_R_64F: { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || sssp >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; int n = static_cast<int>(MCSRG->get_num_vertices()); nvgraph::Vector<double> co(n, handle->stream); nvgraph::Sssp<int, double> sssp_solver(*MCSRG->get_valued_csr_graph(weight_index)); nvgraph::set_connectivity<int, double>(n, *source_vert, 0.0, DBL_MAX, co.raw()); MCSRG->get_vertex_dim(sssp).copy(co); rc = sssp_solver.solve(*source_vert, co, MCSRG->get_vertex_dim(sssp)); break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphTraversal_impl(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const nvgraphTraversal_t traversalT, const int *source_vertex_ptr, const nvgraphTraversalParameter_t params) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_ptr(source_vertex_ptr)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph (storing results) return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_CSR_32) // supported topologies return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->T != CUDA_R_32I) //results are ints return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; //Results (bfs distances, predecessors..) are written in dimension in mvcsrg nvgraph::MultiValuedCsrGraph<int, int> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph< int, int>*>(descrG->graph_handle); // //Computing traversal parameters // size_t distancesIndex, predecessorsIndex, edgeMaskIndex; size_t undirectedFlagParam; size_t alpha_ul, beta_ul; int *distances = NULL, *predecessors = NULL, *edge_mask = NULL; nvgraphTraversalGetDistancesIndex(params, &distancesIndex); nvgraphTraversalGetPredecessorsIndex(params, &predecessorsIndex); nvgraphTraversalGetEdgeMaskIndex(params, &edgeMaskIndex); nvgraphTraversalGetUndirectedFlag(params, &undirectedFlagParam); nvgraphTraversalGetAlpha(params, &alpha_ul); nvgraphTraversalGetBeta(params, &beta_ul); int alpha = static_cast<int>(alpha_ul); int beta = static_cast<int>(beta_ul); //If distances_index was set by user, then use it if (distancesIndex <= MCSRG->get_num_vertex_dim()) { distances = MCSRG->get_vertex_dim(distancesIndex).raw(); } //If predecessors_index was set by user, then use it if (predecessorsIndex <= MCSRG->get_num_vertex_dim()) { predecessors = MCSRG->get_vertex_dim(predecessorsIndex).raw(); } //If edgemask_index was set by user, then use it if (edgeMaskIndex <= MCSRG->get_num_vertex_dim()) { edge_mask = MCSRG->get_edge_dim(edgeMaskIndex).raw(); } int source_vertex = *source_vertex_ptr; int n = static_cast<int>(MCSRG->get_num_vertices()); int nnz = static_cast<int>(MCSRG->get_num_edges()); int *row_offsets = MCSRG->get_raw_row_offsets(); int *col_indices = MCSRG->get_raw_column_indices(); bool undirected = (bool) undirectedFlagParam; if (source_vertex < 0 || source_vertex >= n) { return NVGRAPH_STATUS_INVALID_VALUE; } //Calling corresponding implementation switch (traversalT) { case NVGRAPH_TRAVERSAL_BFS: nvgraph::Bfs<int> bfs_solver(n, nnz, row_offsets, col_indices, !undirected, alpha, beta, handle->stream); //To easily implement multi source with single source, //loop on those two rc = bfs_solver.configure(distances, predecessors, edge_mask); rc = bfs_solver.traverse(source_vertex); break; }; } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } /** * CAPI Method for calling 2d BFS algorithm. * @param handle Nvgraph context handle. * @param descrG Graph handle (must be 2D partitioned) * @param source_vert The source vertex ID * @param distances Pointer to memory allocated to store the distances. * @param predecessors Pointer to memory allocated to store the predecessors * @return Status code. */ nvgraphStatus_t NVGRAPH_API nvgraph2dBfs_impl(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const int32_t source_vert, int32_t* distances, int32_t* predecessors) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus == IS_EMPTY) return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_2D_32I_32I) return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->T != CUDA_R_32I) return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::Matrix2d<int32_t, int32_t, int32_t>* m = static_cast<nvgraph::Matrix2d<int32_t, int32_t, int32_t>*>(descrG->graph_handle); // std::cout << m->toString(); nvgraph::Bfs2d<int32_t, int32_t, int32_t> bfs(m, true, 0, 0); rc = bfs.configure(distances, predecessors); rc = bfs.traverse(source_vert); } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphWidestPath_impl(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int *source_vert, const size_t widest_path) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index) || check_int_ptr(source_vert)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_CSC_32) // supported topologies return NVGRAPH_STATUS_INVALID_VALUE; // cudaError_t cuda_status; switch (descrG->T) { case CUDA_R_32F: { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || widest_path >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; int n = static_cast<int>(MCSRG->get_num_vertices()); nvgraph::Vector<float> co(n, handle->stream); nvgraph::WidestPath<int, float> widest_path_solver(*MCSRG->get_valued_csr_graph(weight_index)); nvgraph::set_connectivity<int, float>(n, *source_vert, FLT_MAX, -FLT_MAX, co.raw()); MCSRG->get_vertex_dim(widest_path).copy(co); rc = widest_path_solver.solve(*source_vert, co, MCSRG->get_vertex_dim(widest_path)); break; } case CUDA_R_64F: { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || widest_path >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; int n = static_cast<int>(MCSRG->get_num_vertices()); nvgraph::Vector<double> co(n, handle->stream); nvgraph::WidestPath<int, double> widest_path_solver(*MCSRG->get_valued_csr_graph(weight_index)); nvgraph::set_connectivity<int, double>(n, *source_vert, DBL_MAX, -DBL_MAX, co.raw()); MCSRG->get_vertex_dim(widest_path).copy(co); rc = widest_path_solver.solve(*source_vert, co, MCSRG->get_vertex_dim(widest_path)); break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphPagerank_impl(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t bookmark, const int has_guess, const size_t rank, const float tolerance, const int max_iter) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index) || check_ptr(alpha)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_CSC_32) // supported topologies return NVGRAPH_STATUS_INVALID_VALUE; if (!(has_guess == 0 || has_guess == 1)) return NVGRAPH_STATUS_INVALID_VALUE; int max_it; float tol; if (max_iter > 0) max_it = max_iter; else max_it = 500; if (tolerance == 0.0f) tol = 1.0E-6f; else if (tolerance < 1.0f && tolerance > 0.0f) tol = tolerance; else return NVGRAPH_STATUS_INVALID_VALUE; switch (descrG->T) { case CUDA_R_32F: { float alphaT = *static_cast<const float*>(alpha); if (alphaT <= 0.0f || alphaT >= 1.0f) return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || bookmark >= MCSRG->get_num_vertex_dim() || rank >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; int n = static_cast<int>(MCSRG->get_num_vertices()); nvgraph::Vector<float> guess(n, handle->stream); nvgraph::Vector<float> bm(n, handle->stream); if (has_guess) guess.copy(MCSRG->get_vertex_dim(rank)); else guess.fill(static_cast<float>(1.0 / n)); bm.copy(MCSRG->get_vertex_dim(bookmark)); nvgraph::Pagerank<int, float> pagerank_solver( *MCSRG->get_valued_csr_graph(weight_index), bm); rc = pagerank_solver.solve(alphaT, guess, MCSRG->get_vertex_dim(rank), tol, max_it); break; } case CUDA_R_64F: { double alphaT = *static_cast<const double*>(alpha); if (alphaT <= 0.0 || alphaT >= 1.0) return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || bookmark >= MCSRG->get_num_vertex_dim() || rank >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; int n = static_cast<int>(MCSRG->get_num_vertices()); nvgraph::Vector<double> guess(n, handle->stream); nvgraph::Vector<double> bm(n, handle->stream); bm.copy(MCSRG->get_vertex_dim(bookmark)); if (has_guess) guess.copy(MCSRG->get_vertex_dim(rank)); else guess.fill(static_cast<float>(1.0 / n)); nvgraph::Pagerank<int, double> pagerank_solver( *MCSRG->get_valued_csr_graph(weight_index), bm); rc = pagerank_solver.solve(alphaT, guess, MCSRG->get_vertex_dim(rank), tol, max_it); break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphKrylovPagerank_impl(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t bookmark, const float tolerance, const int max_iter, const int subspace_size, const int has_guess, const size_t rank) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index) || check_ptr(alpha)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_CSC_32) // supported topologies return NVGRAPH_STATUS_INVALID_VALUE; // cudaError_t cuda_status; int max_it; int ss_sz; float tol; if (max_iter > 0) max_it = max_iter; else max_it = 500; if (subspace_size > 0) ss_sz = subspace_size; else ss_sz = 8; if (tolerance == 0.0f) tol = 1.0E-6f; else if (tolerance < 1.0f && tolerance > 0.0f) tol = tolerance; else return NVGRAPH_STATUS_INVALID_VALUE; switch (descrG->T) { case CUDA_R_32F: { float alphaT = *static_cast<const float*>(alpha); if (alphaT <= 0.0f || alphaT >= 1.0f) return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || bookmark >= MCSRG->get_num_vertex_dim() || rank >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; int n = static_cast<int>(MCSRG->get_num_vertices()); nvgraph::Vector<float> guess(n, handle->stream), eigVals(1, handle->stream); if (has_guess) guess.copy(MCSRG->get_vertex_dim(rank)); else guess.fill(static_cast<float>(1.0 / n)); nvgraph::ImplicitArnoldi<int, float> iram_solver( *MCSRG->get_valued_csr_graph(weight_index), MCSRG->get_vertex_dim(bookmark), tol, max_it, alphaT); rc = iram_solver.solve(ss_sz, 1, guess, eigVals, MCSRG->get_vertex_dim(rank)); break; } case CUDA_R_64F: { // curently iram solver accept float for alpha double alphaTemp = *static_cast<const double*>(alpha); float alphaT = static_cast<float>(alphaTemp); if (alphaT <= 0.0f || alphaT >= 1.0f) return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || bookmark >= MCSRG->get_num_vertex_dim() || rank >= MCSRG->get_num_vertex_dim()) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; int n = static_cast<int>(MCSRG->get_num_vertices()); nvgraph::Vector<double> guess(n, handle->stream), eigVals(1, handle->stream); if (has_guess) guess.copy(MCSRG->get_vertex_dim(rank)); else guess.fill(static_cast<float>(1.0 / n)); nvgraph::ImplicitArnoldi<int, double> iram_solver( *MCSRG->get_valued_csr_graph(weight_index), MCSRG->get_vertex_dim(bookmark), tol, max_it, alphaT); rc = iram_solver.solve(ss_sz, 1, guess, eigVals, MCSRG->get_vertex_dim(rank)); break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphExtractSubgraphByVertex_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, nvgraphGraphDescr_t subdescrG, int *subvertices, size_t numvertices) { NVGRAPH_ERROR rc = NVGRAPH_OK; typedef int IndexType; try { if (check_context(handle) || check_graph(descrG) || !subdescrG || check_int_size(numvertices) || check_ptr(subvertices)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (!numvertices) return NVGRAPH_STATUS_INVALID_VALUE; subdescrG->TT = descrG->TT; subdescrG->T = descrG->T; switch (descrG->graphStatus) { case HAS_TOPOLOGY: //CsrGraph { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<IndexType>*>(descrG->graph_handle); Graph<IndexType>* subgraph = extract_subgraph_by_vertices(*CSRG, subvertices, numvertices, handle->stream); subdescrG->graph_handle = subgraph; subdescrG->graphStatus = HAS_TOPOLOGY; } break; case HAS_VALUES: //MultiValuedCsrGraph if (descrG->T == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, float>* subgraph = extract_subgraph_by_vertices(*MCSRG, subvertices, numvertices, handle->stream); subdescrG->graph_handle = subgraph; subdescrG->graphStatus = HAS_VALUES; } else if (descrG->T == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, double>* subgraph = extract_subgraph_by_vertices(*MCSRG, subvertices, numvertices, handle->stream); subdescrG->graph_handle = subgraph; subdescrG->graphStatus = HAS_VALUES; } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; break; default: return NVGRAPH_STATUS_INVALID_VALUE; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphExtractSubgraphByEdge_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, nvgraphGraphDescr_t subdescrG, int *subedges, size_t numedges) { NVGRAPH_ERROR rc = NVGRAPH_OK; //TODO: extract handle->stream info, from handler/nvgraphContext (?) typedef int IndexType; try { if (check_context(handle) || check_graph(descrG) || !subdescrG || check_int_size(numedges) || check_ptr(subedges)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (!numedges) return NVGRAPH_STATUS_INVALID_VALUE; subdescrG->TT = descrG->TT; subdescrG->T = descrG->T; switch (descrG->graphStatus) { case HAS_TOPOLOGY: //CsrGraph { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); Graph<IndexType>* subgraph = extract_subgraph_by_edges(*CSRG, subedges, numedges, handle->stream); subdescrG->graph_handle = subgraph; subdescrG->graphStatus = HAS_TOPOLOGY; } break; case HAS_VALUES: //MultiValuedCsrGraph if (descrG->T == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, float>* subgraph = extract_subgraph_by_edges(*MCSRG, subedges, numedges, handle->stream); subdescrG->graph_handle = subgraph; subdescrG->graphStatus = HAS_VALUES; } else if (descrG->T == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, double>* subgraph = extract_subgraph_by_edges(*MCSRG, subedges, numedges, handle->stream); subdescrG->graph_handle = subgraph; subdescrG->graphStatus = HAS_VALUES; } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; break; default: return NVGRAPH_STATUS_INVALID_VALUE; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphBalancedCutClustering_impl(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int n_clusters, const int n_eig_vects, const int evs_type, const float evs_tolerance, const int evs_max_iter, const float kmean_tolerance, const int kmean_max_iter, int* clustering, void* eig_vals, void* eig_vects) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_CSR_32) // supported topologies return NVGRAPH_STATUS_INVALID_VALUE; int evs_max_it, kmean_max_it; int iters_lanczos, iters_kmeans; float evs_tol, kmean_tol; if (evs_max_iter > 0) evs_max_it = evs_max_iter; else evs_max_it = 4000; if (evs_tolerance == 0.0f) evs_tol = 1.0E-3f; else if (evs_tolerance < 1.0f && evs_tolerance > 0.0f) evs_tol = evs_tolerance; else return NVGRAPH_STATUS_INVALID_VALUE; if (kmean_max_iter > 0) kmean_max_it = kmean_max_iter; else kmean_max_it = 200; if (kmean_tolerance == 0.0f) kmean_tol = 1.0E-2f; else if (kmean_tolerance < 1.0f && kmean_tolerance > 0.0f) kmean_tol = kmean_tolerance; else return NVGRAPH_STATUS_INVALID_VALUE; if (n_clusters < 2) return NVGRAPH_STATUS_INVALID_VALUE; if (n_eig_vects > n_clusters) return NVGRAPH_STATUS_INVALID_VALUE; if (!(evs_type == 0 || evs_type == 1)) return NVGRAPH_STATUS_INVALID_VALUE; if (clustering == NULL || eig_vals == NULL || eig_vects == NULL) return NVGRAPH_STATUS_INVALID_VALUE; switch (descrG->T) { case CUDA_R_32F: { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || n_clusters > static_cast<int>(MCSRG->get_num_vertices())) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::ValuedCsrGraph<int, float> network = *MCSRG->get_valued_csr_graph(weight_index); Vector<int> clust(MCSRG->get_num_vertices(), handle->stream); Vector<float> eigVals(n_eig_vects, handle->stream); Vector<float> eigVecs(MCSRG->get_num_vertices() * n_eig_vects, handle->stream); if (evs_type == 0) { int restartIter_lanczos = 15 + n_eig_vects; rc = partition<int, float>(network, n_clusters, n_eig_vects, evs_max_it, restartIter_lanczos, evs_tol, kmean_max_it, kmean_tol, clust.raw(), eigVals, eigVecs, iters_lanczos, iters_kmeans); } else { cusolverDnHandle_t cusolverHandle; cusolverDnCreate(&cusolverHandle); rc = partition_lobpcg<int, float>(network, NULL, // preconditioner cusolverHandle, n_clusters, n_eig_vects, evs_max_it, evs_tol, kmean_max_it, kmean_tol, clust.raw(), eigVals, eigVecs, iters_lanczos, iters_kmeans); } // give a copy of results to the user if (rc == NVGRAPH_OK) { CHECK_CUDA(cudaMemcpy((int* )clustering, clust.raw(), (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy((float* )eig_vals, eigVals.raw(), (size_t )(n_eig_vects * sizeof(float)), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy((float* )eig_vects, eigVecs.raw(), (size_t )(n_eig_vects * MCSRG->get_num_vertices() * sizeof(float)), cudaMemcpyDefault)); } break; } case CUDA_R_64F: { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || n_clusters > static_cast<int>(MCSRG->get_num_vertices())) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::ValuedCsrGraph<int, double> network = *MCSRG->get_valued_csr_graph(weight_index); Vector<int> clust(MCSRG->get_num_vertices(), handle->stream); Vector<double> eigVals(n_eig_vects, handle->stream); Vector<double> eigVecs(MCSRG->get_num_vertices() * n_eig_vects, handle->stream); if (evs_type == 0) { int restartIter_lanczos = 15 + n_eig_vects; rc = partition<int, double>(network, n_clusters, n_eig_vects, evs_max_it, restartIter_lanczos, evs_tol, kmean_max_it, kmean_tol, clust.raw(), eigVals, eigVecs, iters_lanczos, iters_kmeans); } else { cusolverDnHandle_t cusolverHandle; cusolverDnCreate(&cusolverHandle); rc = partition_lobpcg<int, double>(network, NULL, // preconditioner cusolverHandle, n_clusters, n_eig_vects, evs_max_it, evs_tol, kmean_max_it, kmean_tol, clust.raw(), eigVals, eigVecs, iters_lanczos, iters_kmeans); } // give a copy of results to the user if (rc == NVGRAPH_OK) { CHECK_CUDA(cudaMemcpy((int* )clustering, clust.raw(), (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy((double* )eig_vals, eigVals.raw(), (size_t )(n_eig_vects * sizeof(double)), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy((double* )eig_vects, eigVecs.raw(), (size_t )(n_eig_vects * MCSRG->get_num_vertices() * sizeof(double)), cudaMemcpyDefault)); } break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphAnalyzeBalancedCut_impl(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int n_clusters, const int* clustering, float * edgeCut, float * ratioCut) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_CSR_32) // supported topologies return NVGRAPH_STATUS_INVALID_VALUE; if (n_clusters < 2) return NVGRAPH_STATUS_INVALID_VALUE; if (clustering == NULL || edgeCut == NULL || ratioCut == NULL) return NVGRAPH_STATUS_INVALID_VALUE; switch (descrG->T) { case CUDA_R_32F: { float edge_cut, ratio_cut; nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || n_clusters > static_cast<int>(MCSRG->get_num_vertices())) return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::ValuedCsrGraph<int, float> network = *MCSRG->get_valued_csr_graph(weight_index); Vector<int> clust(MCSRG->get_num_vertices(), handle->stream); CHECK_CUDA(cudaMemcpy(clust.raw(), (int* )clustering, (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); rc = analyzePartition<int, float>(network, n_clusters, clust.raw(), edge_cut, ratio_cut); *edgeCut = edge_cut; *ratioCut = ratio_cut; break; } case CUDA_R_64F: { double edge_cut, ratio_cut; nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || n_clusters > static_cast<int>(MCSRG->get_num_vertices())) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::ValuedCsrGraph<int, double> network = *MCSRG->get_valued_csr_graph(weight_index); Vector<int> clust(MCSRG->get_num_vertices(), handle->stream); CHECK_CUDA(cudaMemcpy(clust.raw(), (int* )clustering, (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); rc = analyzePartition<int, double>(network, n_clusters, clust.raw(), edge_cut, ratio_cut); *edgeCut = static_cast<float>(edge_cut); *ratioCut = static_cast<float>(ratio_cut); break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphHeavyEdgeMatching_impl( nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const nvgraphEdgeWeightMatching_t similarity_metric, int* aggregates, size_t* num_aggregates) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_CSR_32) // supported topologies return NVGRAPH_STATUS_INVALID_VALUE; if (aggregates == NULL) return NVGRAPH_STATUS_INVALID_VALUE; Matching_t sim_metric; switch (similarity_metric) { case NVGRAPH_UNSCALED: { sim_metric = USER_PROVIDED; break; } case NVGRAPH_SCALED_BY_ROW_SUM: { sim_metric = SCALED_BY_ROW_SUM; break; } case NVGRAPH_SCALED_BY_DIAGONAL: { sim_metric = SCALED_BY_DIAGONAL; break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } switch (descrG->T) { case CUDA_R_32F: { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim()) return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::ValuedCsrGraph<int, float> network = *MCSRG->get_valued_csr_graph(weight_index); Vector<int> agg(MCSRG->get_num_vertices(), handle->stream); int num_agg = 0; nvgraph::Size2Selector<int, float> one_phase_hand_checking(sim_metric); rc = one_phase_hand_checking.setAggregates(network, agg, num_agg); *num_aggregates = static_cast<size_t>(num_agg); CHECK_CUDA(cudaMemcpy((int* )aggregates, agg.raw(), (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); break; } case CUDA_R_64F: { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim()) return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::ValuedCsrGraph<int, double> network = *MCSRG->get_valued_csr_graph(weight_index); Vector<int> agg(MCSRG->get_num_vertices(), handle->stream); Vector<int> agg_global(MCSRG->get_num_vertices(), handle->stream); int num_agg = 0; nvgraph::Size2Selector<int, double> one_phase_hand_checking(sim_metric); rc = one_phase_hand_checking.setAggregates(network, agg, num_agg); *num_aggregates = static_cast<size_t>(num_agg); CHECK_CUDA(cudaMemcpy((int* )aggregates, agg.raw(), (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphSpectralModularityMaximization_impl( nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int n_clusters, const int n_eig_vects, const float evs_tolerance, const int evs_max_iter, const float kmean_tolerance, const int kmean_max_iter, int* clustering, void* eig_vals, void* eig_vects) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_CSR_32) // supported topologies return NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED; int evs_max_it, kmean_max_it; int iters_lanczos, iters_kmeans; float evs_tol, kmean_tol; if (evs_max_iter > 0) evs_max_it = evs_max_iter; else evs_max_it = 4000; if (evs_tolerance == 0.0f) evs_tol = 1.0E-3f; else if (evs_tolerance < 1.0f && evs_tolerance > 0.0f) evs_tol = evs_tolerance; else return NVGRAPH_STATUS_INVALID_VALUE; if (kmean_max_iter > 0) kmean_max_it = kmean_max_iter; else kmean_max_it = 200; if (kmean_tolerance == 0.0f) kmean_tol = 1.0E-2f; else if (kmean_tolerance < 1.0f && kmean_tolerance > 0.0f) kmean_tol = kmean_tolerance; else return NVGRAPH_STATUS_INVALID_VALUE; if (n_clusters < 2) return NVGRAPH_STATUS_INVALID_VALUE; if (n_eig_vects > n_clusters) return NVGRAPH_STATUS_INVALID_VALUE; if (clustering == NULL || eig_vals == NULL || eig_vects == NULL) return NVGRAPH_STATUS_INVALID_VALUE; switch (descrG->T) { case CUDA_R_32F: { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || n_clusters > static_cast<int>(MCSRG->get_num_vertices())) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::ValuedCsrGraph<int, float> network = *MCSRG->get_valued_csr_graph(weight_index); Vector<int> clust(MCSRG->get_num_vertices(), handle->stream); Vector<float> eigVals(n_eig_vects, handle->stream); Vector<float> eigVecs(MCSRG->get_num_vertices() * n_eig_vects, handle->stream); int restartIter_lanczos = 15 + n_eig_vects; rc = modularity_maximization<int, float>(network, n_clusters, n_eig_vects, evs_max_it, restartIter_lanczos, evs_tol, kmean_max_it, kmean_tol, clust.raw(), eigVals, eigVecs, iters_lanczos, iters_kmeans); // give a copy of results to the user if (rc == NVGRAPH_OK) { CHECK_CUDA(cudaMemcpy((int* )clustering, clust.raw(), (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy((float* )eig_vals, eigVals.raw(), (size_t )(n_eig_vects * sizeof(float)), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy((float* )eig_vects, eigVecs.raw(), (size_t )(n_eig_vects * MCSRG->get_num_vertices() * sizeof(float)), cudaMemcpyDefault)); } break; } case CUDA_R_64F: { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || n_clusters > static_cast<int>(MCSRG->get_num_vertices())) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::ValuedCsrGraph<int, double> network = *MCSRG->get_valued_csr_graph(weight_index); Vector<int> clust(MCSRG->get_num_vertices(), handle->stream); Vector<double> eigVals(n_eig_vects, handle->stream); Vector<double> eigVecs(MCSRG->get_num_vertices() * n_eig_vects, handle->stream); int restartIter_lanczos = 15 + n_eig_vects; rc = modularity_maximization<int, double>(network, n_clusters, n_eig_vects, evs_max_it, restartIter_lanczos, evs_tol, kmean_max_it, kmean_tol, clust.raw(), eigVals, eigVecs, iters_lanczos, iters_kmeans); // give a copy of results to the user if (rc == NVGRAPH_OK) { CHECK_CUDA(cudaMemcpy((int* )clustering, clust.raw(), (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy((double* )eig_vals, eigVals.raw(), (size_t )(n_eig_vects * sizeof(double)), cudaMemcpyDefault)); CHECK_CUDA(cudaMemcpy((double* )eig_vects, eigVecs.raw(), (size_t )(n_eig_vects * MCSRG->get_num_vertices() * sizeof(double)), cudaMemcpyDefault)); } break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } nvgraphStatus_t NVGRAPH_API nvgraphAnalyzeModularityClustering_impl( nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int n_clusters, const int* clustering, float * modularity) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_int_size(weight_index)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->graphStatus != HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->TT != NVGRAPH_CSR_32) // supported topologies return NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED; if (n_clusters < 2) return NVGRAPH_STATUS_INVALID_VALUE; if (clustering == NULL || modularity == NULL) return NVGRAPH_STATUS_INVALID_VALUE; switch (descrG->T) { case CUDA_R_32F: { float mod; nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || n_clusters > static_cast<int>(MCSRG->get_num_vertices())) return NVGRAPH_STATUS_INVALID_VALUE; nvgraph::ValuedCsrGraph<int, float> network = *MCSRG->get_valued_csr_graph(weight_index); Vector<int> clust(MCSRG->get_num_vertices(), handle->stream); CHECK_CUDA(cudaMemcpy(clust.raw(), (int* )clustering, (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); rc = analyzeModularity<int, float>(network, n_clusters, clust.raw(), mod); *modularity = mod; break; } case CUDA_R_64F: { double mod; nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); if (weight_index >= MCSRG->get_num_edge_dim() || n_clusters > static_cast<int>(MCSRG->get_num_vertices())) // base index is 0 return NVGRAPH_STATUS_INVALID_VALUE; Vector<int> clust(MCSRG->get_num_vertices(), handle->stream); CHECK_CUDA(cudaMemcpy(clust.raw(), (int* )clustering, (size_t )(MCSRG->get_num_vertices() * sizeof(int)), cudaMemcpyDefault)); nvgraph::ValuedCsrGraph<int, double> network = *MCSRG->get_valued_csr_graph(weight_index); rc = analyzeModularity<int, double>(network, n_clusters, clust.raw(), mod); *modularity = static_cast<float>(mod); break; } default: return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } #ifndef NVGRAPH_LIGHT nvgraphStatus_t NVGRAPH_API nvgraphContractGraph_impl(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, nvgraphGraphDescr_t contrdescrG, int *aggregates, size_t numaggregates, nvgraphSemiringOps_t VertexCombineOp, nvgraphSemiringOps_t VertexReduceOp, nvgraphSemiringOps_t EdgeCombineOp, nvgraphSemiringOps_t EdgeReduceOp, int flag) //unused, for now { NVGRAPH_ERROR rc = NVGRAPH_OK; typedef int IndexType; try { if (check_context(handle) || check_graph(descrG) || !contrdescrG || check_int_size(numaggregates) || check_ptr(aggregates)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); contrdescrG->TT = descrG->TT; contrdescrG->T = descrG->T; switch (descrG->graphStatus) { case HAS_TOPOLOGY: //CsrGraph { nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<IndexType>*>(descrG->graph_handle); Graph<IndexType>* contracted_graph = NULL; switch (VertexCombineOp) { case NVGRAPH_MULTIPLY: contracted_graph = contract_graph_csr_mul(*CSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; case NVGRAPH_SUM: contracted_graph = contract_graph_csr_sum(*CSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; case NVGRAPH_MIN: contracted_graph = contract_graph_csr_min(*CSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; case NVGRAPH_MAX: contracted_graph = contract_graph_csr_max(*CSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; } contrdescrG->graph_handle = contracted_graph; contrdescrG->graphStatus = HAS_TOPOLOGY; } break; case HAS_VALUES: //MultiValuedCsrGraph if (descrG->T == CUDA_R_32F) { nvgraph::MultiValuedCsrGraph<int, float> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, float>* contracted_graph = NULL; switch (VertexCombineOp) { case NVGRAPH_MULTIPLY: contracted_graph = contract_graph_mv_float_mul(*MCSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; case NVGRAPH_SUM: contracted_graph = contract_graph_mv_float_sum(*MCSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; case NVGRAPH_MIN: contracted_graph = contract_graph_mv_float_min(*MCSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; case NVGRAPH_MAX: contracted_graph = contract_graph_mv_float_max(*MCSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; } contrdescrG->graph_handle = contracted_graph; contrdescrG->graphStatus = HAS_VALUES; } else if (descrG->T == CUDA_R_64F) { nvgraph::MultiValuedCsrGraph<int, double> *MCSRG = static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(descrG->graph_handle); nvgraph::MultiValuedCsrGraph<int, double>* contracted_graph = NULL; switch (VertexCombineOp) { case NVGRAPH_MULTIPLY: contracted_graph = contract_graph_mv_double_mul(*MCSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; case NVGRAPH_SUM: contracted_graph = contract_graph_mv_double_sum(*MCSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; case NVGRAPH_MIN: contracted_graph = contract_graph_mv_double_min(*MCSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; case NVGRAPH_MAX: contracted_graph = contract_graph_mv_double_max(*MCSRG, aggregates, numaggregates, handle->stream, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp); break; } contrdescrG->graph_handle = contracted_graph; contrdescrG->graphStatus = HAS_VALUES; } else return NVGRAPH_STATUS_TYPE_NOT_SUPPORTED; break; default: return NVGRAPH_STATUS_INVALID_VALUE; } } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } #endif nvgraphStatus_t NVGRAPH_API nvgraphSpectralClustering_impl(nvgraphHandle_t handle, // nvGRAPH library handle. const nvgraphGraphDescr_t descrG, // nvGRAPH graph descriptor, should contain the connectivity information in NVGRAPH_CSR_32 or NVGRAPH_CSR_32 at least 1 edge set (weights) const size_t weight_index, // Index of the edge set for the weights. const struct SpectralClusteringParameter *params, //parameters, see struct SpectralClusteringParameter int* clustering, // (output) clustering void* eig_vals, // (output) eigenvalues void* eig_vects) // (output) eigenvectors { if (check_ptr(params) || check_ptr(clustering) || check_ptr(eig_vals) || check_ptr(eig_vects)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (params->algorithm == NVGRAPH_MODULARITY_MAXIMIZATION) return nvgraph::nvgraphSpectralModularityMaximization_impl(handle, descrG, weight_index, params->n_clusters, params->n_eig_vects, params->evs_tolerance, params->evs_max_iter, params->kmean_tolerance, params->kmean_max_iter, clustering, eig_vals, eig_vects); else if (params->algorithm == NVGRAPH_BALANCED_CUT_LANCZOS) return nvgraph::nvgraphBalancedCutClustering_impl(handle, descrG, weight_index, params->n_clusters, params->n_eig_vects, 0, params->evs_tolerance, params->evs_max_iter, params->kmean_tolerance, params->kmean_max_iter, clustering, eig_vals, eig_vects); else if (params->algorithm == NVGRAPH_BALANCED_CUT_LOBPCG) return nvgraph::nvgraphBalancedCutClustering_impl(handle, descrG, weight_index, params->n_clusters, params->n_eig_vects, 1, params->evs_tolerance, params->evs_max_iter, params->kmean_tolerance, params->kmean_max_iter, clustering, eig_vals, eig_vects); else return NVGRAPH_STATUS_INVALID_VALUE; } nvgraphStatus_t NVGRAPH_API nvgraphAnalyzeClustering_impl(nvgraphHandle_t handle, // nvGRAPH library handle. const nvgraphGraphDescr_t descrG, // nvGRAPH graph descriptor, should contain the connectivity information in NVGRAPH_CSR_32 at least 1 edge set (weights) const size_t weight_index, // Index of the edge set for the weights. const int n_clusters, //number of clusters const int* clustering, // clustering to analyse nvgraphClusteringMetric_t metric, // metric to compute to measure the clustering quality float * score) // (output) clustering score telling how good the clustering is for the selected metric. { if (check_ptr(clustering) || check_ptr(score)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (metric == NVGRAPH_MODULARITY) return nvgraphAnalyzeModularityClustering_impl(handle, descrG, weight_index, n_clusters, clustering, score); else if (metric == NVGRAPH_EDGE_CUT) { float dummy = 0; return nvgraph::nvgraphAnalyzeBalancedCut_impl(handle, descrG, weight_index, n_clusters, clustering, score, &dummy); } else if (metric == NVGRAPH_RATIO_CUT) { float dummy = 0; return nvgraph::nvgraphAnalyzeBalancedCut_impl(handle, descrG, weight_index, n_clusters, clustering, &dummy, score); } else return NVGRAPH_STATUS_INVALID_VALUE; } nvgraphStatus_t NVGRAPH_API nvgraphTriangleCount_impl(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, uint64_t* result) { NVGRAPH_ERROR rc = NVGRAPH_OK; try { if (check_context(handle) || check_graph(descrG) || check_ptr(result)) FatalError("Incorrect parameters.", NVGRAPH_ERR_BAD_PARAMETERS); if (descrG->TT != NVGRAPH_CSR_32 && descrG->TT != NVGRAPH_CSC_32) // supported topologies return NVGRAPH_STATUS_INVALID_VALUE; if (descrG->graphStatus != HAS_TOPOLOGY && descrG->graphStatus != HAS_VALUES) { return NVGRAPH_STATUS_INVALID_VALUE; // should have topology } nvgraph::CsrGraph<int> *CSRG = static_cast<nvgraph::CsrGraph<int>*>(descrG->graph_handle); if (CSRG == NULL) return NVGRAPH_STATUS_MAPPING_ERROR; nvgraph::triangles_counting::TrianglesCount<int> counter(*CSRG); /* stream, device */ rc = counter.count(); uint64_t s_res = counter.get_triangles_count(); *result = static_cast<uint64_t>(s_res); } NVGRAPH_CATCHES(rc) return getCAPIStatusForError(rc); } } /*namespace nvgraph*/ /************************* * API *************************/ nvgraphStatus_t NVGRAPH_API nvgraphGetProperty(libraryPropertyType type, int *value) { switch (type) { case MAJOR_VERSION: *value = CUDART_VERSION / 1000; break; case MINOR_VERSION: *value = (CUDART_VERSION % 1000) / 10; break; case PATCH_LEVEL: *value = 0; break; default: return NVGRAPH_STATUS_INVALID_VALUE; } return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphCreate(nvgraphHandle_t *handle) { return nvgraph::nvgraphCreate_impl(handle); } nvgraphStatus_t NVGRAPH_API nvgraphCreateMulti(nvgraphHandle_t *handle, int numDevices, int* devices) { return nvgraph::nvgraphCreateMulti_impl(handle, numDevices, devices); } nvgraphStatus_t NVGRAPH_API nvgraphDestroy(nvgraphHandle_t handle) { return nvgraph::nvgraphDestroy_impl(handle); } nvgraphStatus_t NVGRAPH_API nvgraphCreateGraphDescr(nvgraphHandle_t handle, nvgraphGraphDescr_t *descrG) { return nvgraph::nvgraphCreateGraphDescr_impl(handle, descrG); } nvgraphStatus_t NVGRAPH_API nvgraphDestroyGraphDescr(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG) { return nvgraph::nvgraphDestroyGraphDescr_impl(handle, descrG); } nvgraphStatus_t NVGRAPH_API nvgraphSetStream(nvgraphHandle_t handle, cudaStream_t stream) { return nvgraph::nvgraphSetStream_impl(handle, stream); } nvgraphStatus_t NVGRAPH_API nvgraphSetGraphStructure(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void* topologyData, nvgraphTopologyType_t topologyType) { return nvgraph::nvgraphSetGraphStructure_impl(handle, descrG, topologyData, topologyType); } nvgraphStatus_t NVGRAPH_API nvgraphGetGraphStructure(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void* topologyData, nvgraphTopologyType_t* topologyType) { return nvgraph::nvgraphGetGraphStructure_impl(handle, descrG, topologyData, topologyType); } nvgraphStatus_t NVGRAPH_API nvgraphAllocateVertexData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t numsets, cudaDataType_t *settypes) { return nvgraph::nvgraphAllocateVertexData_impl(handle, descrG, numsets, settypes); } nvgraphStatus_t NVGRAPH_API nvgraphAllocateEdgeData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t numsets, cudaDataType_t *settypes) { return nvgraph::nvgraphAllocateEdgeData_impl(handle, descrG, numsets, settypes); } nvgraphStatus_t NVGRAPH_API nvgraphExtractSubgraphByVertex(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, nvgraphGraphDescr_t subdescrG, int *subvertices, size_t numvertices) { return nvgraph::nvgraphExtractSubgraphByVertex_impl(handle, descrG, subdescrG, subvertices, numvertices); } nvgraphStatus_t NVGRAPH_API nvgraphExtractSubgraphByEdge(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, nvgraphGraphDescr_t subdescrG, int *subedges, size_t numedges) { return nvgraph::nvgraphExtractSubgraphByEdge_impl(handle, descrG, subdescrG, subedges, numedges); } nvgraphStatus_t NVGRAPH_API nvgraphSetVertexData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *vertexData, size_t setnum) { return nvgraph::nvgraphSetVertexData_impl(handle, descrG, vertexData, setnum); } nvgraphStatus_t NVGRAPH_API nvgraphGetVertexData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *vertexData, size_t setnum) { return nvgraph::nvgraphGetVertexData_impl(handle, descrG, vertexData, setnum); } nvgraphStatus_t NVGRAPH_API nvgraphConvertTopology(nvgraphHandle_t handle, nvgraphTopologyType_t srcTType, void *srcTopology, void *srcEdgeData, cudaDataType_t *dataType, nvgraphTopologyType_t dstTType, void *dstTopology, void *dstEdgeData) { return nvgraph::nvgraphConvertTopology_impl(handle, srcTType, srcTopology, srcEdgeData, dataType, dstTType, dstTopology, dstEdgeData); } nvgraphStatus_t NVGRAPH_API nvgraphSetEdgeData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *edgeData, size_t setnum) { return nvgraph::nvgraphSetEdgeData_impl(handle, descrG, edgeData, setnum); } nvgraphStatus_t NVGRAPH_API nvgraphGetEdgeData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *edgeData, size_t setnum) { return nvgraph::nvgraphGetEdgeData_impl(handle, descrG, edgeData, setnum); } nvgraphStatus_t NVGRAPH_API nvgraphSrSpmv(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t x, const void *beta, const size_t y, const nvgraphSemiring_t SR) { return nvgraph::nvgraphSrSpmv_impl_cub(handle, descrG, weight_index, alpha, x, beta, y, SR); } nvgraphStatus_t NVGRAPH_API nvgraphSssp(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int *source_vert, const size_t sssp) { return nvgraph::nvgraphSssp_impl(handle, descrG, weight_index, source_vert, sssp); } //nvgraphTraversal typedef enum { NVGRAPH_TRAVERSAL_DISTANCES_INDEX = 0, NVGRAPH_TRAVERSAL_PREDECESSORS_INDEX = 1, NVGRAPH_TRAVERSAL_MASK_INDEX = 2, NVGRAPH_TRAVERSAL_UNDIRECTED_FLAG_INDEX = 3, NVGRAPH_TRAVERSAL_ALPHA = 4, NVGRAPH_TRAVERSAL_BETA = 5 } nvgraphTraversalParameterIndex_t; nvgraphStatus_t NVGRAPH_API nvgraphTraversalParameterInit(nvgraphTraversalParameter_t *param) { if (check_ptr(param)) return NVGRAPH_STATUS_INVALID_VALUE; param->pad[NVGRAPH_TRAVERSAL_DISTANCES_INDEX] = INT_MAX; param->pad[NVGRAPH_TRAVERSAL_PREDECESSORS_INDEX] = INT_MAX; param->pad[NVGRAPH_TRAVERSAL_MASK_INDEX] = INT_MAX; param->pad[NVGRAPH_TRAVERSAL_UNDIRECTED_FLAG_INDEX] = 0; param->pad[NVGRAPH_TRAVERSAL_ALPHA] = TRAVERSAL_DEFAULT_ALPHA; param->pad[NVGRAPH_TRAVERSAL_BETA] = TRAVERSAL_DEFAULT_BETA; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetDistancesIndex(nvgraphTraversalParameter_t *param, const size_t value) { if (check_ptr(param)) return NVGRAPH_STATUS_INVALID_VALUE; param->pad[NVGRAPH_TRAVERSAL_DISTANCES_INDEX] = value; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetDistancesIndex( const nvgraphTraversalParameter_t param, size_t *value) { if (check_ptr(value)) return NVGRAPH_STATUS_INVALID_VALUE; *value = param.pad[NVGRAPH_TRAVERSAL_DISTANCES_INDEX]; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetPredecessorsIndex(nvgraphTraversalParameter_t *param, const size_t value) { if (check_ptr(param)) return NVGRAPH_STATUS_INVALID_VALUE; param->pad[NVGRAPH_TRAVERSAL_PREDECESSORS_INDEX] = value; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetPredecessorsIndex( const nvgraphTraversalParameter_t param, size_t *value) { if (check_ptr(value)) return NVGRAPH_STATUS_INVALID_VALUE; *value = param.pad[NVGRAPH_TRAVERSAL_PREDECESSORS_INDEX]; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetEdgeMaskIndex(nvgraphTraversalParameter_t *param, const size_t value) { if (check_ptr(param)) return NVGRAPH_STATUS_INVALID_VALUE; param->pad[NVGRAPH_TRAVERSAL_MASK_INDEX] = value; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetEdgeMaskIndex( const nvgraphTraversalParameter_t param, size_t *value) { if (check_ptr(value)) return NVGRAPH_STATUS_INVALID_VALUE; *value = param.pad[NVGRAPH_TRAVERSAL_MASK_INDEX]; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetUndirectedFlag(nvgraphTraversalParameter_t *param, const size_t value) { if (check_ptr(param)) return NVGRAPH_STATUS_INVALID_VALUE; param->pad[NVGRAPH_TRAVERSAL_UNDIRECTED_FLAG_INDEX] = value; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetUndirectedFlag( const nvgraphTraversalParameter_t param, size_t *value) { if (check_ptr(value)) return NVGRAPH_STATUS_INVALID_VALUE; *value = param.pad[NVGRAPH_TRAVERSAL_UNDIRECTED_FLAG_INDEX]; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetAlpha(nvgraphTraversalParameter_t *param, const size_t value) { if (check_ptr(param)) return NVGRAPH_STATUS_INVALID_VALUE; param->pad[NVGRAPH_TRAVERSAL_ALPHA] = value; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetAlpha(const nvgraphTraversalParameter_t param, size_t *value) { if (check_ptr(value)) return NVGRAPH_STATUS_INVALID_VALUE; *value = param.pad[NVGRAPH_TRAVERSAL_ALPHA]; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetBeta(nvgraphTraversalParameter_t *param, const size_t value) { if (check_ptr(param)) return NVGRAPH_STATUS_INVALID_VALUE; param->pad[NVGRAPH_TRAVERSAL_BETA] = value; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetBeta(const nvgraphTraversalParameter_t param, size_t *value) { if (check_ptr(value)) return NVGRAPH_STATUS_INVALID_VALUE; *value = param.pad[NVGRAPH_TRAVERSAL_BETA]; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphTraversal(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const nvgraphTraversal_t traversalT, const int *source_vert, const nvgraphTraversalParameter_t params) { return nvgraph::nvgraphTraversal_impl(handle, descrG, traversalT, source_vert, params); } /** * CAPI Method for calling 2d BFS algorithm. * @param handle Nvgraph context handle. * @param descrG Graph handle (must be 2D partitioned) * @param source_vert The source vertex ID * @param distances Pointer to memory allocated to store the distances. * @param predecessors Pointer to memory allocated to store the predecessors * @return Status code. */ nvgraphStatus_t NVGRAPH_API nvgraph2dBfs(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const int32_t source_vert, int32_t* distances, int32_t* predecessors) { return nvgraph::nvgraph2dBfs_impl(handle, descrG, source_vert, distances, predecessors); } //nvgraphWidestPath nvgraphStatus_t NVGRAPH_API nvgraphWidestPath(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int *source_vert, const size_t widest_path) { return nvgraph::nvgraphWidestPath_impl(handle, descrG, weight_index, source_vert, widest_path); } nvgraphStatus_t NVGRAPH_API nvgraphPagerank(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t bookmark, const int has_guess, const size_t pagerank_index, const float tolerance, const int max_iter) { return nvgraph::nvgraphPagerank_impl(handle, descrG, weight_index, alpha, bookmark, has_guess, pagerank_index, tolerance, max_iter); } nvgraphStatus_t NVGRAPH_API nvgraphKrylovPagerank(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t bookmark, const float tolerance, const int max_iter, const int subspace_size, const int has_guess, const size_t rank) { return nvgraph::nvgraphKrylovPagerank_impl(handle, descrG, weight_index, alpha, bookmark, tolerance, max_iter, subspace_size, has_guess, rank); } nvgraphStatus_t NVGRAPH_API nvgraphBalancedCutClustering(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int n_clusters, const int n_eig_vects, const int evs_type, const float evs_tolerance, const int evs_max_iter, const float kmean_tolerance, const int kmean_max_iter, int* clustering, void* eig_vals, void* eig_vects) { return nvgraph::nvgraphBalancedCutClustering_impl(handle, descrG, weight_index, n_clusters, n_eig_vects, evs_type, evs_tolerance, evs_max_iter, kmean_tolerance, kmean_max_iter, clustering, eig_vals, eig_vects); } nvgraphStatus_t NVGRAPH_API nvgraphAnalyzeBalancedCut(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int n_clusters, const int* clustering, float * edgeCut, float * ratioCut) { return nvgraph::nvgraphAnalyzeBalancedCut_impl(handle, descrG, weight_index, n_clusters, clustering, edgeCut, ratioCut); } nvgraphStatus_t NVGRAPH_API nvgraphHeavyEdgeMatching( nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const nvgraphEdgeWeightMatching_t similarity_metric, int* aggregates, size_t* num_aggregates) { return nvgraph::nvgraphHeavyEdgeMatching_impl(handle, descrG, weight_index, similarity_metric, aggregates, num_aggregates); } nvgraphStatus_t NVGRAPH_API nvgraphSpectralModularityMaximization(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int n_clusters, const int n_eig_vects, const float evs_tolerance, const int evs_max_iter, const float kmean_tolerance, const int kmean_max_iter, int* clustering, void* eig_vals, void* eig_vects) { return nvgraph::nvgraphSpectralModularityMaximization_impl(handle, descrG, weight_index, n_clusters, n_eig_vects, evs_tolerance, evs_max_iter, kmean_tolerance, kmean_max_iter, clustering, eig_vals, eig_vects); } nvgraphStatus_t NVGRAPH_API nvgraphAnalyzeModularityClustering(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int n_clusters, const int* clustering, float * modularity) { return nvgraph::nvgraphAnalyzeModularityClustering_impl(handle, descrG, weight_index, n_clusters, clustering, modularity); } #ifndef NVGRAPH_LIGHT nvgraphStatus_t NVGRAPH_API nvgraphContractGraph(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, nvgraphGraphDescr_t contrdescrG, int *aggregates, size_t numaggregates, nvgraphSemiringOps_t VertexCombineOp, nvgraphSemiringOps_t VertexReduceOp, nvgraphSemiringOps_t EdgeCombineOp, nvgraphSemiringOps_t EdgeReduceOp, int flag) { return nvgraph::nvgraphContractGraph_impl(handle, descrG, contrdescrG, aggregates, numaggregates, VertexCombineOp, VertexReduceOp, EdgeCombineOp, EdgeReduceOp, flag); } #endif nvgraphStatus_t NVGRAPH_API nvgraphSpectralClustering(nvgraphHandle_t handle, // nvGRAPH library handle. const nvgraphGraphDescr_t descrG, // nvGRAPH graph descriptor, should contain the connectivity information in NVGRAPH_CSR_32 or NVGRAPH_CSR_32 at least 1 edge set (weights) const size_t weight_index, // Index of the edge set for the weights. const struct SpectralClusteringParameter *params, //parameters, see struct SpectralClusteringParameter int* clustering, // (output) clustering void* eig_vals, // (output) eigenvalues void* eig_vects) // (output) eigenvectors { return nvgraph::nvgraphSpectralClustering_impl(handle, descrG, weight_index, params, clustering, eig_vals, eig_vects); } nvgraphStatus_t NVGRAPH_API nvgraphAnalyzeClustering(nvgraphHandle_t handle, // nvGRAPH library handle. const nvgraphGraphDescr_t descrG, // nvGRAPH graph descriptor, should contain the connectivity information in NVGRAPH_CSR_32 at least 1 edge set (weights) const size_t weight_index, // Index of the edge set for the weights. const int n_clusters, //number of clusters const int* clustering, // clustering to analyse nvgraphClusteringMetric_t metric, // metric to compute to measure the clustering quality float * score) // (output) clustering score telling how good the clustering is for the selected metric. { return nvgraph::nvgraphAnalyzeClustering_impl(handle, descrG, weight_index, n_clusters, clustering, metric, score); } nvgraphStatus_t NVGRAPH_API nvgraphTriangleCount(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, uint64_t* result) { return nvgraph::nvgraphTriangleCount_impl(handle, descrG, result); } nvgraphStatus_t NVGRAPH_API nvgraphLouvain (cudaDataType_t index_type, cudaDataType_t val_type, const size_t num_vertex, const size_t num_edges, void* csr_ptr, void* csr_ind, void* csr_val, int weighted, int has_init_cluster, void* init_cluster, void* final_modularity, void* best_cluster_vec, void* num_level) { NVLOUVAIN_STATUS status = NVLOUVAIN_OK; if ((csr_ptr == NULL) || (csr_ind == NULL) || ((csr_val == NULL) && (weighted == 1)) || ((init_cluster == NULL) && (has_init_cluster == 1)) || (final_modularity == NULL) || (best_cluster_vec == NULL) || (num_level == NULL)) return NVGRAPH_STATUS_INVALID_VALUE; std::ostream log(0); bool weighted_b = weighted; bool has_init_cluster_b = has_init_cluster; if (val_type == CUDA_R_32F) status = nvlouvain::louvain ((int*)csr_ptr, (int*)csr_ind, (float*)csr_val, num_vertex, num_edges, weighted_b, has_init_cluster_b, (int*)init_cluster, *((float*)final_modularity), (int*)best_cluster_vec,*((int*)num_level), log); else status = nvlouvain::louvain ((int*)csr_ptr, (int*)csr_ind, (double*)csr_val, num_vertex, num_edges, weighted_b, has_init_cluster_b, (int*)init_cluster, *((double*)final_modularity), (int*)best_cluster_vec,*((int*)num_level), log); if (status != NVLOUVAIN_OK) return NVGRAPH_STATUS_INTERNAL_ERROR; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphJaccard (cudaDataType_t index_type, cudaDataType_t val_type, const size_t n, const size_t e, void* csr_ptr, void* csr_ind, void* csr_val, int weighted, void* v, void* gamma, void* weight_j) { int status = 0; if ((csr_ptr == NULL) || (csr_ind == NULL) || ((csr_val == NULL) && (weighted == 1)) || (gamma == NULL) || (weight_j == NULL)) return NVGRAPH_STATUS_INVALID_VALUE; bool weighted_b = weighted; if (val_type == CUDA_R_32F) { float* weight_i = NULL, *weight_s = NULL, *work = NULL; NVG_CUDA_TRY(cudaMalloc ((void**)&weight_i, sizeof(float) * e)); NVG_CUDA_TRY(cudaMalloc ((void**)&weight_s, sizeof(float) * e)); if (weighted_b == true) { NVG_CUDA_TRY(cudaMalloc ((void**)&work, sizeof(float) * n)); status = nvlouvain::jaccard <true> (n, e, (int*) csr_ptr, (int*) csr_ind, (float*) csr_val, (float*) v, work, *((float*) gamma), weight_i, weight_s, (float*)weight_j); NVG_CUDA_TRY(cudaFree (work)); } else { NVG_CUDA_TRY(cudaMalloc ((void**)&work, sizeof(float) * n)); nvlouvain::fill(e, (float*)weight_j, (float)1.0); status = nvlouvain::jaccard <false> (n, e, (int*) csr_ptr, (int*) csr_ind, (float*) csr_val, (float*) v, work, *((float*) gamma), weight_i, weight_s, (float*)weight_j); NVG_CUDA_TRY(cudaFree (work)); } NVG_CUDA_TRY(cudaFree (weight_s)); NVG_CUDA_TRY(cudaFree (weight_i)); } else { double* weight_i = NULL, *weight_s = NULL, *work = NULL; NVG_CUDA_TRY(cudaMalloc ((void**)&weight_i, sizeof(double) * e)); NVG_CUDA_TRY(cudaMalloc ((void**)&weight_s, sizeof(double) * e)); if (weighted_b == true) { NVG_CUDA_TRY(cudaMalloc ((void**)&work, sizeof(double) * n)); status = nvlouvain::jaccard <true> (n, e, (int*) csr_ptr, (int*) csr_ind, (double*) csr_val, (double*) v, work, *((double*) gamma), weight_i, weight_s, (double*)weight_j); NVG_CUDA_TRY(cudaFree (work)); } else { NVG_CUDA_TRY(cudaMalloc ((void**)&work, sizeof(double) * n)); nvlouvain::fill(e, (double*)weight_j, (double)1.0); status = nvlouvain::jaccard <false> (n, e, (int*) csr_ptr, (int*) csr_ind, (double*) csr_val, (double*) v, work, *((double*) gamma), weight_i, weight_s, (double*)weight_j); NVG_CUDA_TRY(cudaFree (work)); } NVG_CUDA_TRY(cudaFree (weight_s)); NVG_CUDA_TRY(cudaFree (weight_i)); } if (status != 0) return NVGRAPH_STATUS_INTERNAL_ERROR; return NVGRAPH_STATUS_SUCCESS; } nvgraphStatus_t NVGRAPH_API nvgraphAttachGraphStructure(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void* topologyData, nvgraphTopologyType_t TT) { return nvgraph::nvgraphAttachGraphStructure_impl( handle, descrG, topologyData, TT); } nvgraphStatus_t NVGRAPH_API nvgraphAttachVertexData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t setnum, cudaDataType_t settype, void *vertexData) { return nvgraph::nvgraphAttachVertexData_impl( handle, descrG, setnum, settype, vertexData); } nvgraphStatus_t NVGRAPH_API nvgraphAttachEdgeData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t setnum, cudaDataType_t settype, void *edgeData) { return nvgraph::nvgraphAttachEdgeData_impl( handle, descrG, setnum, settype, edgeData); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/csrmv.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* This file contains the nvgraph generalized implementation of the Duane Merrill's CUB CSRMV using MergePath */ #include "nvgraph_csrmv.hxx" #include "exclusive_kv_scan.hxx" //atomics are included in semiring #include "semiring.hxx" #include "nvgraph_error.hxx" //IMPORTANT: IndexType_ must be a signed integer, long, long long etc. Unsigned int is not supported, since -1 is //used as a flag value namespace nvgraph{ //Calculates SM to be used-add to cpp host file __forceinline__ cudaError_t SmVersion(int &smVersion, int deviceOrdinal) { cudaError_t error = cudaSuccess; //assume sucess and state otherwise if fails condition do { //Find out SM version int major, minor; if (error = cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, deviceOrdinal)) break; if (error = cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, deviceOrdinal)) break; smVersion = 100 * major + 10 * minor; } while(0); return error; } template< int _BLOCK_THREADS, //number of threads per thread block int _ITEMS_PER_THREAD> //number of items per individual thread struct SpmvBlockThread //this is in agent file other template parameters ignoring for now { //set constants enum { BLOCK_THREADS = _BLOCK_THREADS, //number of threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, //number of items per thread per tile(tid) of input }; }; //This function calculates the MergePath(load-balancing) for each thread by doing a binary search //along the diagonal template<typename IndexType_> __device__ __forceinline__ void MergePathSearch( IndexType_ diag, IndexType_ *A, //rowoffsets + 1 IndexType_ offset, //counter array IndexType_ A_length, IndexType_ B_length, Coord<IndexType_> &pathCoord) //returned by reference stores the path { IndexType_ splitMin = max(diag - B_length, IndexType_(0)); //must be nonnegative IndexType_ splitMax = min(diag, A_length); //stay in bounds //do binary search along diagonal while (splitMin < splitMax) { IndexType_ splitPivot = (splitMin + splitMax) / 2; //take average integer division-start in middle so can go up or down diagonal if (A[splitPivot] <= diag - splitPivot - 1 + offset) //i+j = diag -1 along cross diag **ignored B //move up A and down B from (i,j) to (i-1,j+1) { splitMin = splitPivot + 1; //increase a in case that it is less clearly before split_min <= split_pivot less than average } else { //move down A and up B splitMax = splitPivot; } } //transform back to array coordinates from cross diagaonl coordinates pathCoord.x = min(splitMin, A_length); //make sure do not go out of bounds; //constraint i + j = k pathCoord.y = diag - splitMin; } //Spmv search kernel that calls merge path and identifies the merge path starting coordinates for each tile template <typename SpmvBlockThread, typename IndexType_, typename ValueType_> __global__ void DeviceSpmvSearchKernel( //calls device function merge path int numMergeTiles, //[input] Number of spmv merge tiles which is the spmv grid size Coord<IndexType_> *dTileCoords, //[output] pointer to a temporary array of tile starting coordinates CsrMvParams<IndexType_, ValueType_> spParams) //[input] spmv input parameter with corrdponding needed arrays { //set the constants for the gpu architecture enum { BLOCK_THREADS = SpmvBlockThread::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvBlockThread::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid <= numMergeTiles) //verify within domain { IndexType_ diag = tid * TILE_ITEMS; Coord<IndexType_> tileCoord; //each tid will compute its own tile_coordinate //the above coordinate will be stored in tile_coordinate passed by reference //input row pointer starting at csrRowPtr[1] merge path ignores the 0 entry //the first argument to the counting constructor is the size-nnz and the second argument is where to start countings IndexType_ countStart = 0; //if row pointer is 1 based make sure count starts at 1 instead of 0 MergePathSearch(diag, spParams.csrRowPtr, countStart, spParams.m, spParams.nnz, tileCoord); //store path of thread in array of coordinates dTileCoords[tid] = tileCoord; //stores (y,x) = (i.j) coord of thread computed* } } //Agent sturct with two main inline functions which compute the spmv template< typename SpmvPolicyT, // parameterized SpmvBlockThread tuning policy type as listed above typename IndexType_, //index value of rowOffsets and ColIndices typename ValueType_, //matrix and vector value type typename SemiRingType_, //this follows different semiring structs to be passed depending on the enum bool hasAlpha, //signifies whether the input parameter alpha is 1 in y = alpha*A*x + beta*A*y bool hasBeta> //signifies whether the input parameter beta is 0 struct AgentSpmv { //set constants enum { BLOCK_THREADS = SpmvPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; //we use the return type pair for scanning where the pairs are accumulated segment-value with segemn-index __device__ __forceinline__ KeyValuePair<IndexType_,ValueType_> consumeTile( Coord<IndexType_> tileStartCoord, //this gives the starting coordinate to be determined from the initial mergepath call Coord<IndexType_> tileEndCoord, CsrMvParams<IndexType_, ValueType_> &spParams, SemiRingType_ SR) //pass struct as a const reference { IndexType_ tileNumRows = tileEndCoord.x - tileStartCoord.x; //length(rowOffSets) = numRows + 1 in merge path ignore first element for 1 and so length of path in x-direction gives the exact number of rows IndexType_ tileNnz = tileEndCoord.y - tileStartCoord.y; //number of nonzero goes down path countingITerator is indexed by columnInd and Val array which are of size nnz //load row offsets into shared memory-create shared memory row offset pointer __shared__ IndexType_ smemTileRowPtr[ITEMS_PER_THREAD + TILE_ITEMS + 1]; //copy row offsets into shared memory for accumulating matrix vector dot products in the merge path for (int item = threadIdx.x; item <= tileNumRows; item += BLOCK_THREADS) //index by block_threads that is the number of threads per block //start with rowoffsets at the strat coordinate and corresponding threadId can modiy wd to do a cache wrapper for efficiency later { if ((tileStartCoord.x + item) < spParams.m) //memory protection since already at +1 only go up to m { smemTileRowPtr[item] = spParams.csrRowPtr[tileStartCoord.x + item]; } } //after loading into shared memory we must sync the threads to make sure all complete __syncthreads(); Coord<IndexType_> threadStartCoord; //call MergePath again on shared memory after using start indices IndexType_ diag = threadIdx.x * ITEMS_PER_THREAD; //compute diagonal //shared memory row pointer has been indexed down to 0 so count offset can start at 0 too //counter iterator starts at current y position IndexType_ countIndId = tileStartCoord.y; MergePathSearch(diag, smemTileRowPtr, //sort list A = row offsets in shared memort countIndId, //sort list B = natural number consecutive counting indices starting index tileNumRows, tileNnz, threadStartCoord); //resulting path is stored in threadStartCoord __syncthreads(); //make sure every thread has completed their diagonal of merge path //Compute the thread's merge path segment to perform the dot product foing down the merge path below in the loop Coord<IndexType_> threadCurrentCoord = threadStartCoord; KeyValuePair<IndexType_, ValueType_> scanSegment[ITEMS_PER_THREAD]; //static array of type key value pairs //initialize each dot product contribution to 0 ValueType_ totalValue; SR.setPlus_ident(totalValue);//initialize to semiring identity for plus operation #pragma unroll //unroll for loop for efficiency for (int item = 0; item < ITEMS_PER_THREAD; ++item) //loop over items belonging to thread along merge path { //go down merge path and sum. when move to right new component of result vector y //countInd is consecutive nonzero natural number array going down the matrix B so //indexed by y whereas rowOffset goes to the move and is A indexed by x countIndId = threadCurrentCoord.y + tileStartCoord.y; //line number problem IndexType_ nnzId = min(countIndId, spParams.nnz - 1); //make sure stay in bounds IndexType_ colIdx = spParams.csrColInd[nnzId]; ValueType_ A_val = spParams.csrVal[nnzId]; //A val //we assume A and x are of the same datatype //recall standard algorithm : y[row] += val[nz]*x[colInd[nnz]] in traditional sparse matrix vector form ValueType_ x_val = spParams.x[colIdx]; //csrColInd[nnzId] //wrapper of x vector could change dependent on the architecture //counter will tell direction to move either right or down since last entry of rowoffsets is the totla number of nonzeros //the counter array keeps track of this if (countIndId < smemTileRowPtr[threadCurrentCoord.x]) //this means less than the number of nonzeros in that row { //move down current row accumulating matrix and vector dot product totalValue = SR.plus(SR.times(A_val, x_val), totalValue); //add binary operation because may change to minus and min rather than + and * //store in key value pair scanSegment[item].key = tileNumRows; scanSegment[item].value = totalValue; ++threadCurrentCoord.y; } else //move right to new row and reset {//added in else if condition scanSegment[item].key = threadCurrentCoord.x; scanSegment[item].value = totalValue; //store current without adding new and set to 0 for new row SR.setPlus_ident(totalValue);//0.0;//SR.times_null; ++threadCurrentCoord.x; } } __syncthreads(); //now each thread block has their matrix vector multiplication and we must do a blockwide reduction //Block-wide reduce-value-by-segment KeyValuePair<IndexType_, ValueType_> scanItem, tileCarry; //this is the key value pair that we will be returning scanItem.key = threadCurrentCoord.x; //added min in other version had min with num rows scanItem.value = totalValue; PrefixSum<IndexType_, ValueType_, SemiRingType_, BLOCK_THREADS>(SR).ExclusiveKeyValueScan(scanItem, tileCarry); if (tileNumRows > 0) { if (threadIdx.x == 0) scanItem.key = -1; //can be negative imp to be int rather than unsigned int //do a direct scatter #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { if (scanSegment[item].key < tileNumRows) //scanSegment is an array of key value pairs { if (scanItem.key == scanSegment[item].key) { scanSegment[item].value = SR.plus(scanItem.value, scanSegment[item].value); } if (hasAlpha){ //boolean set to 1 need to multiply Ax by alpha as stored in spParams scanSegment[item].value = SR.times(spParams.alpha, scanSegment[item].value); } //check if has beta then need to alter y the right hand side is multiplied by beta if (hasBeta) { //y = alpha*A*x + beta*y ValueType_ y_val = spParams.y[tileStartCoord.x + scanSegment[item].key]; //currentxcoord is stored in the key and this will give corresponding and desired row entry in y scanSegment[item].value = SR.plus(SR.times(spParams.beta, y_val), scanSegment[item].value); } //Set the output vector row element spParams.y[tileStartCoord.x + scanSegment[item].key] = scanSegment[item].value; //disjoint keys } } } //Return the til'es running carry-out key value pair return tileCarry; //will come from exclusive scan } //overload consumetile function for the one in the interafce which will be called by the dispatch function __device__ __forceinline__ void consumeTile ( Coord<IndexType_> *dTileCoords, //pointer to the temporary array of tile starting cooordinates IndexType_ *dTileCarryKeys, //output pointer to temporary array carry-out dot product row-ids, one per block ValueType_ *dTileCarryValues, //output pointer to temporary array carry-out dot product row-ids, one per block int numMergeTiles, //number of merge tiles CsrMvParams<IndexType_, ValueType_> spParams, SemiRingType_ SR) { int tid = (blockIdx.x * gridDim.y) + blockIdx.y; //curent tile index //only continue if tid is in proper range if (tid >= numMergeTiles) return; Coord<IndexType_> tileStartCoord = dTileCoords[tid]; //+0 ignored Coord<IndexType_> tileEndCoord = dTileCoords[tid + 1]; //Consume multi-segment tile by calling above consumeTile overloaded function KeyValuePair<IndexType_, ValueType_> tileCarry = consumeTile( tileStartCoord, tileEndCoord, spParams, SR); //output the tile's carry out if (threadIdx.x == 0) { if (hasAlpha) tileCarry.value = SR.times(spParams.alpha, tileCarry.value); tileCarry.key += tileStartCoord.x; if (tileCarry.key < spParams.m) { dTileCarryKeys[tid] = tileCarry.key; dTileCarryValues[tid] = tileCarry.value; } else { // Make sure to reject keys larger than the matrix size directly here. // printf("%d %lf\n",tileCarry.key , tileCarry.value); // this patch may be obsolete after the changes related to bug#1754610 dTileCarryKeys[tid] = -1; } } } }; //this device kernel will call the above agent function-ignoring policies for now template < typename SpmvBlockThread, //parameterized spmvpolicy tunign policy type typename IndexType_, //index type either 32 bit or 64 bit integer for rowoffsets of columnindices typename ValueType_, //matrix and vector value type typename SemiRingType_, //this follows different semiring structs to be passed depending on the enum bool hasAlpha, //determines where alpha = 1 as above bool hasBeta> //determines whether beta = 0 as above __global__ void DeviceSpmvKernel( //this will call consume tile CsrMvParams<IndexType_, ValueType_> spParams, //pass constant reference to spmv parameters const SemiRingType_ &SR, Coord<IndexType_> *dTileCoords, //input pointer to temporaray array of the tile starting coordinates of each (y,x) = (i,j) pair on the merge path IndexType_ *dTileCarryKeys, //output is a pointer to the temp array that carries out the dot porduct row-ids where it is one per block ValueType_ *dTileCarryValues, //output is a pointer to the temp array that carries out the dot porduct row-ids where it is one per block int numTiles //input which is the number of merge tiles ) { //call Spmv agent type specialization- need to fix this call!! //now call cosntructor to initialize and consumeTile to calculate the row dot products AgentSpmv<SpmvBlockThread, IndexType_, ValueType_, SemiRingType_, hasAlpha, hasBeta>().consumeTile( dTileCoords, dTileCarryKeys, dTileCarryValues, numTiles, spParams, SR); } //Helper functions for the reduction by kernel //for block loading block_load_vectorize for SM_30 implemenation from cub //Load linear segment into blocked arrangement across the thread block, guarded by range, //with a fall-back assignment of -1 for out of bound template<int ITEMS_PER_THREAD, typename IndexType_, typename ValueType_> __device__ __forceinline__ void loadDirectBlocked( int linearTid, //input:a asuitable 1d thread-identifier for calling the thread IndexType_ *blockItrKeys, //input: thread block's base input iterator for loading from ValueType_ *blockItrValues, //input: thread block's base input iterator for loading from KeyValuePair<IndexType_, ValueType_> (&items)[ITEMS_PER_THREAD], // output:data to load int validItems, //input:Number of valid items to load KeyValuePair<IndexType_, ValueType_> outOfBoundsDefault) //input:Default value to assign to out of bounds items -1 in this case { #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { int offset = (linearTid * ITEMS_PER_THREAD) + item; // changed validItems to validItems-1 for bug#1754610 since it was causing uninitialized memory accesses here items[item].key = (offset < validItems-1) ? blockItrKeys[offset] : outOfBoundsDefault.key; items[item].value = (offset < validItems-1) ? blockItrValues[offset] : outOfBoundsDefault.value; } } //load linear segment of items into a blocked arangement across a thread block template<int ITEMS_PER_THREAD, typename IndexType_, typename ValueType_> __device__ __forceinline__ void loadDirectBlocked( int linearTid, IndexType_ * blockItrKeys, ValueType_ * blockItrValues, KeyValuePair<IndexType_,ValueType_> (&items)[ITEMS_PER_THREAD]) { //Load directly in thread-blocked order #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { items[item].key = blockItrKeys[(linearTid *ITEMS_PER_THREAD) + item]; items[item].value = blockItrValues[(linearTid *ITEMS_PER_THREAD) + item]; } } //This part pertains to the fixup kernel which does a device-wide reduce-value-by-key //for the thread blocks template< typename SpmvPolicyT, // parameterized SpmvBlockThread tuning policy type as listed above typename IndexType_, typename ValueType_, typename SemiRingType_> //matrix and vector value type struct AgentSegmentReduction { //set constants enum { BLOCK_THREADS = SpmvPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; //This function processes an input tile and uses an atomic rewrite strategy template<bool isLastTile> __device__ __forceinline__ void consumeTilePost( IndexType_ *dInKeys, //input array of key value pairs ValueType_ *dInValues, //input array of key value pairs ValueType_ *dAggregatesOut, //output value aggregates into final array y IndexType_ numRemaining, //Number of global input items remaining including this tile IndexType_ tileOffset, //Tile offset SemiRingType_ SR ) { KeyValuePair<IndexType_,ValueType_> pairs[ITEMS_PER_THREAD]; KeyValuePair<IndexType_, ValueType_> outOfBoundsPair; outOfBoundsPair.key = -1; //default value to assign to out of bounds items is set to be -1 int linearTid = threadIdx.x; //load the values into pairs if (isLastTile) { loadDirectBlocked<ITEMS_PER_THREAD, IndexType_, ValueType_> (linearTid, dInKeys + tileOffset, dInValues + tileOffset, pairs, numRemaining, outOfBoundsPair); } else { loadDirectBlocked<ITEMS_PER_THREAD, IndexType_, ValueType_> (linearTid, dInKeys + tileOffset, dInValues + tileOffset, pairs); } #pragma unroll for (int item = 1; item < ITEMS_PER_THREAD; ++item) { ValueType_ *dScatter = dAggregatesOut + pairs[item-1].key; //write to correct row using the key if (pairs[item].key != pairs[item-1].key) { SR.atomicPlus(dScatter, pairs[item -1].value); } else pairs[item].value = SR.plus(pairs[item -1].value, pairs[item].value); //the operation is SUm } // Write out last item if it is valid by checking last key boolean. // pairs[ITEMS_PER_THREAD - 1].key = -1 for out bound elements. ValueType_ *dScatter = dAggregatesOut + pairs[ITEMS_PER_THREAD - 1].key; if ((!isLastTile || pairs[ITEMS_PER_THREAD - 1].key >= 0)) { //printf("hello %d %lf\n", pairs[ITEMS_PER_THREAD - 1].key , pairs[ITEMS_PER_THREAD -1].value); SR.atomicPlus(dScatter, pairs[ITEMS_PER_THREAD -1].value); } } //this function will call consumeTilePost and it scans the tiles of items as a part of a dynamic chained scan __device__ __forceinline__ void consumeRange( IndexType_ *dKeysIn, //input array of key value pairs ValueType_ *dValuesIn, //input array of key value pairs ValueType_ *dAggregatesOut, //output value aggregates into final array y int numItems, //totall number of input items int numTiles, //total number of input tiles SemiRingType_ SR) { //Blocks are launched in increasing order, so we assign one tile per block int tileIdx = (blockIdx.x * gridDim.y) + blockIdx.y; //current tile index same as in consumeTile IndexType_ tileOffset = tileIdx * TILE_ITEMS; //Global offset for the current tile IndexType_ numRemaining = numItems - tileOffset; //Remaining items which includes this tile if (numRemaining > TILE_ITEMS) //this is not the last tile so call wit template argument set to be false consumeTilePost<false>(dKeysIn, dValuesIn, dAggregatesOut, numRemaining,tileOffset, SR); else if (numRemaining > 0) //this is the last tile which could be possibly partially full consumeTilePost<true>(dKeysIn, dValuesIn, dAggregatesOut, numRemaining,tileOffset, SR); } }; //Blockwide reduction by key final kernel template < typename SpmvBlockThreadSegment, //parameterized spmvpolicy tuning policy type typename IndexType_, typename ValueType_, typename SemiRingType_> __global__ void DeviceSegmentReductionByKeyKernel( //this will call consume tile IndexType_ *dKeysIn, //input pointer to the arry of dot product carried out by row-ids, one per spmv block ValueType_ *dValuesIn, //input pointer to the arry of dot product carried out by row-ids, one per spmv block ValueType_ *dAggregatesOut, //output value aggregates - will be y-final output of method IndexType_ numItems, // total number of items to select int numTiles, //total number of tiles for the entire problem SemiRingType_ SR) { //now call cosntructor to initialize and consumeTile to calculate the row dot products AgentSegmentReduction<SpmvBlockThreadSegment, IndexType_, ValueType_, SemiRingType_>().consumeRange( dKeysIn, dValuesIn, dAggregatesOut, numItems, numTiles, SR); } template<typename IndexType_, typename ValueType_, typename SemiRingType_, bool hasAlpha, bool hasBeta> //matrix and vector value type //this is setting all the grid parameters and size struct DispatchSpmv { //declare constants enum { INIT_KERNEL_THREADS = 128 }; //sample tuning polic- can add more later //SM30 struct Policy350 //as a sample there are many other policies to follow { typedef SpmvBlockThread< (sizeof(ValueType_) > 4) ? 96 : 128, //for double use 96 threads per block otherwise 128 (sizeof(ValueType_) > 4) ? 4 : 4 //for double use 4 items per thread otherwise use 7 > SpmvPolicyT;///use instead of PtxPolicy come backa nd use cusparse to determine the architetcure }; struct Policy350Reduction //as a sample there are many other policies to follow { typedef SpmvBlockThread<128,3> SpmvPolicyT; //use instead of PtxPolicy come backa nd use cusparse to determine the architetcure };//for <128,1> 1 item per thread need a reduction by key __forceinline__ static cudaError_t Dispatch(CsrMvParams<IndexType_,ValueType_> spParams, const SemiRingType_ &SR, cudaStream_t stream = 0) { cudaError_t error = cudaSuccess; //could move this block to initkernel fucntion int blockThreads = Policy350::SpmvPolicyT::BLOCK_THREADS; int itemsPerThread = Policy350::SpmvPolicyT::ITEMS_PER_THREAD; int blockThreadsRed = Policy350Reduction::SpmvPolicyT::BLOCK_THREADS; int itemsPerThreadRed = Policy350Reduction::SpmvPolicyT::ITEMS_PER_THREAD; //calculate total number of spmv work items do { //do-while loop condition at end of loop //Get device ordinal int deviceOrdinal, smVersion, smCount, maxDimx; if (error = cudaGetDevice(&deviceOrdinal)) break; //Get device SM version if (error = SmVersion(smVersion, deviceOrdinal)) break; //Get SM count-cudaDeviceGetAttribute is built in cuda function if (error = cudaDeviceGetAttribute(&smCount, cudaDevAttrMultiProcessorCount, deviceOrdinal)) break; //Get max dimension of the grid in the x direction if (error = cudaDeviceGetAttribute(&maxDimx, cudaDevAttrMaxGridDimX, deviceOrdinal)) break; int numMergeItems = spParams.m + spParams.nnz; //total amount of work for one diagonal/thread //Tile sizes of relevant kernels int mergeTileSize = blockThreads * itemsPerThread; //for floats this will be a larger number //and since we will be dividing by it less memory allocated for the float case int segmentRedTileSize = blockThreadsRed * itemsPerThreadRed; //Calculate number of tiles for the kernels //need unsigned int to prevent underflow/overflow unsigned int numMergeTiles = (numMergeItems + mergeTileSize - 1) / mergeTileSize; //launch thread number unsigned int numSegmentRedTiles = (numMergeTiles + segmentRedTileSize - 1) / segmentRedTileSize; //int spmv_sm_occupancy ignore maxSmOccupancy function for now and corresponding segmentfixup //get grid dimensions use cuda built in dattetype dim3-has constructor with the 3 arguments dim3 spmvGridSize(min(numMergeTiles, (unsigned int) maxDimx), (numMergeTiles + maxDimx - 1) / maxDimx, //make sure at least 1 1); //2D grid //grid for second kernel dim3 segmentRedGridSize(min(numSegmentRedTiles, (unsigned int) maxDimx), (numSegmentRedTiles + maxDimx -1) / maxDimx, 1); Vector<Coord<IndexType_> > dTileCoords(numMergeTiles + 1, stream); Vector<IndexType_> dTileCarryKeys(numMergeTiles, stream); Vector<ValueType_> dTileCarryValues(numMergeTiles, stream); //Get search grid dimensions int searchBlockSize = INIT_KERNEL_THREADS; int searchGridSize = (numMergeTiles + searchBlockSize) / searchBlockSize; //ignored the +1 -1 //call Search Kernel within the host so need <<>>> //call devicesearch kernel to compute starting coordiantes of merge path DeviceSpmvSearchKernel<typename Policy350::SpmvPolicyT, IndexType_, ValueType_> <<<searchGridSize, searchBlockSize, 0, stream >>>( numMergeTiles, dTileCoords.raw(), spParams); cudaCheckError(); //this will give the starting coordaintes to be called in DeviceSPmvKernel DeviceSpmvKernel<typename Policy350::SpmvPolicyT, IndexType_,ValueType_, SemiRingType_, hasAlpha, hasBeta> <<<spmvGridSize, blockThreads, 0, stream>>>( spParams, SR, dTileCoords.raw(), dTileCarryKeys.raw(), dTileCarryValues.raw(), numMergeTiles); cudaCheckError(); //Run reduce by key kernel if necessary //if (error = cudaPeekAtLastError()) break; //check for failure to launch if (numMergeTiles > 1) { DeviceSegmentReductionByKeyKernel<typename Policy350Reduction::SpmvPolicyT, IndexType_, ValueType_, SemiRingType_> <<<segmentRedGridSize, blockThreadsRed, 0>>> (dTileCarryKeys.raw(), dTileCarryValues.raw(), spParams.y, numMergeTiles, numSegmentRedTiles, SR); cudaCheckError(); //if (error = cudaPeekAtLastError()) break; //check for failure to launch of fixup kernel } } while(0); //make sure executes exactly once to give chance to break earlier with errors cudaCheckError(); return error; } }; template<typename IndexType_, typename ValueType_, typename SemiRingType_> cudaError_t callDispatchSpmv(CsrMvParams<IndexType_, ValueType_> &spParams, const SemiRingType_ &SR, cudaStream_t stream = 0) { cudaError_t error; //determine semiring type if (spParams.beta == SR.times_null) { if (spParams.alpha == SR.times_ident) //simply y = A*x error = DispatchSpmv<IndexType_, ValueType_, SemiRingType_, false, false>::Dispatch(spParams, SR, stream); //must be on the device else error = DispatchSpmv<IndexType_, ValueType_,SemiRingType_, true, false>::Dispatch(spParams, SR, stream); //must be passed by reference to some since writing } else { if (spParams.alpha == SR.times_ident) error = DispatchSpmv<IndexType_, ValueType_, SemiRingType_, false, true>::Dispatch(spParams, SR, stream); else error = DispatchSpmv<IndexType_, ValueType_, SemiRingType_, true, true>::Dispatch(spParams, SR, stream); } return error; } template<typename IndexType_, typename ValueType_> cudaError_t callSemiringSpmv(CsrMvParams<IndexType_, ValueType_> &spParams, Semiring SR, cudaStream_t stream = 0) { // This is dangerous but we need to initialize this value, probably it's // better to return success than to return some misleading error code cudaError_t error = cudaSuccess; switch(SR) { case PlusTimes: { PlusTimesSemiring<ValueType_> plustimes; //can be float or double for real case error = callDispatchSpmv(spParams, plustimes, stream); } break; case MinPlus: { MinPlusSemiring<ValueType_> minplus; error = callDispatchSpmv(spParams, minplus, stream); } break; case MaxMin: { MaxMinSemiring<ValueType_> maxmin; error = callDispatchSpmv(spParams, maxmin, stream); } break; case OrAndBool: { OrAndBoolSemiring<ValueType_> orandbool; error = callDispatchSpmv(spParams, orandbool, stream); } break; case LogPlus: { LogPlusSemiring<ValueType_> logplus; error = callDispatchSpmv(spParams, logplus, stream); } break; } return error; } //create a device function interface to call the above dispatch function template <typename IndexType_, typename ValueType_> cudaError_t csrmv_mp( IndexType_ n, IndexType_ m, IndexType_ nnz, ValueType_ alpha, ValueType_ * dValues, //all must be preallocated on the device IndexType_ * dRowOffsets, IndexType_ * dColIndices, ValueType_ *dVectorX, ValueType_ beta, ValueType_ *dVectorY, Semiring SR, cudaStream_t stream) { //create user interface //calling device kernel depends on tempalte boolean parameters fro alpha/beta //Set parameters for struct CsrMvParams<IndexType_, ValueType_> spParams; spParams.m = m; spParams.n = n; spParams.nnz = nnz; spParams.alpha = alpha; spParams.beta = beta; spParams.csrRowPtr = dRowOffsets + 1; //ignore first 0 component in merge path specific for this spmv only spParams.csrVal = dValues; spParams.csrColInd = dColIndices; spParams.x = dVectorX; spParams.y = dVectorY; return callSemiringSpmv(spParams, SR, stream); } template<typename IndexType_, typename ValueType_> cudaError_t csrmv_mp( IndexType_ n, IndexType_ m, IndexType_ nnz, ValueType_ alpha, ValuedCsrGraph <IndexType_, ValueType_> network, ValueType_ *dVectorX, ValueType_ beta, ValueType_ *dVectorY, Semiring SR, cudaStream_t stream ) { //calling device kernel depends on tempalte boolean parameters fro alpha/beta //Set parameters for struct CsrMvParams<IndexType_, ValueType_> spParams; spParams.m = m; spParams.n = n; spParams.nnz = nnz; spParams.alpha = alpha; spParams.beta = beta; spParams.csrRowPtr = network.get_raw_row_offsets() + 1; //ignore first 0 component in merge path specific for this spmv only spParams.csrVal = network.get_raw_values(); spParams.csrColInd = network.get_raw_column_indices(); spParams.x = dVectorX; spParams.y = dVectorY; return callSemiringSpmv(spParams, SR, stream); } //declare template types to be called template cudaError_t csrmv_mp<int, double>( int n, int m, int nnz, double alpha, double * dValues, //all must be preallocated on the device int * dRowOffsets, int * dColIndices, double *dVectorX, double beta, double *dVectorY, Semiring SR, cudaStream_t stream ); template cudaError_t csrmv_mp<long long, double>( long long n, long long m, long long nnz, double alpha, double * dValues, //all must be preallocated on the device long long * dRowOffsets, long long * dColIndices, double *dVectorX, double beta, double *dVectorY, Semiring SR, cudaStream_t stream ); template cudaError_t csrmv_mp<int, float>( int n, int m, int nnz, float alpha, float * dValues, //all must be preallocated on the device int * dRowOffsets, int * dColIndices, float *dVectorX, float beta, float *dVectorY, Semiring SR, cudaStream_t stream ); //for 64 bit support which may not be needed template cudaError_t csrmv_mp<long long, float>( long long n, long long m, long long nnz, float alpha, float * dValues, //all must be preallocated on the device long long * dRowOffsets, long long * dColIndices, float *dVectorX, float beta, float *dVectorY, Semiring SR, cudaStream_t stream ); //assume embedding booleans in the reals /*template cudaError_t csrmv_mp<int, bool>( int n, int m, int nnz, bool alpha, bool * dValues, //all must be preallocated on the device int * dRowOffsets, int * dColIndices, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR ); //for 64 bit support which may not be needed template cudaError_t csrmv_mp<long long, bool>( long long n, long long m, long long nnz, bool alpha, bool * dValues, //all must be preallocated on the device long long * dRowOffsets, long long * dColIndices, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR );*/ //declare template types to be called using valued_csr_graph version template cudaError_t csrmv_mp<int, double>( int n, int m, int nnz, double alpha, ValuedCsrGraph <int, double> network, double *dVectorX, double beta, double *dVectorY, Semiring SR, cudaStream_t stream ); template cudaError_t csrmv_mp<long long, double>( long long n, long long m, long long nnz, double alpha, ValuedCsrGraph <long long, double> network, double *dVectorX, double beta, double *dVectorY, Semiring SR, cudaStream_t stream ); template cudaError_t csrmv_mp<int, float>( int n, int m, int nnz, float alpha, ValuedCsrGraph <int, float> network, float *dVectorX, float beta, float *dVectorY, Semiring SR, cudaStream_t stream ); //for 64 bit support which may not be needed template cudaError_t csrmv_mp<long long, float>( long long n, long long m, long long nnz, float alpha, ValuedCsrGraph <long long, float> network, float *dVectorX, float beta, float *dVectorY, Semiring SR, cudaStream_t stream ); /*template cudaError_t csrmv_mp<int, bool>( int n, int m, int nnz, bool alpha, ValuedCsrGraph <int, bool> network, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR ); //for 64 bit support which may not be needed template cudaError_t csrmv_mp<long long, bool>( long long n, long long m, long long nnz, bool alpha, ValuedCsrGraph <long long, bool> network, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR );*/ } //end namespace nvgraph using namespace nvgraph; //this is the standard kernel used to test the semiring operations template<typename IndexType_, typename ValueType_, typename SemiRingType_> __global__ void csrmv(IndexType_ num_rows, IndexType_ *dRowOffsets, IndexType_ *dColIndices, ValueType_ *dValues, ValueType_ *dVectorX, ValueType_ *dVectorY, SemiRingType_ SR, ValueType_ alpha, ValueType_ beta) { int row = blockDim.x * blockIdx.x + threadIdx.x ; if (row < num_rows) { ValueType_ dot; SR.setPlus_ident(dot); //SR.setPlus_ident(dVectorY[row]); //need to initialize y outside IndexType_ row_start = dRowOffsets[row]; IndexType_ row_end = dRowOffsets[row + 1]; for (int i = row_start; i < row_end; i++) { dot = SR.plus(SR.times(alpha,SR.times(dValues[i], dVectorX[dColIndices[i]])), dot); } dVectorY[row] = SR.plus(dot, (SR.times(beta, dVectorY[row]))); } } template<typename IndexType_, typename ValueType_> void callTestCsrmv(IndexType_ num_rows, IndexType_ *dRowOffsets, IndexType_ *dColIndices, ValueType_ *dValues, ValueType_ *dVectorX, ValueType_ *dVectorY, nvgraph::Semiring SR, ValueType_ alpha, ValueType_ beta) { const int side = 2048; const int numThreads = 256; const int numBlocks = (side * side + numThreads - 1) / numThreads; switch(SR) { case nvgraph::PlusTimes: { nvgraph::PlusTimesSemiring<ValueType_> plustimes; //can be float or double for real case csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, plustimes, alpha, beta); } break; case nvgraph::MinPlus: { nvgraph::MinPlusSemiring<ValueType_> minplus; csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, minplus, alpha, beta); } break; case nvgraph::MaxMin: { nvgraph::MaxMinSemiring<ValueType_> maxmin; csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, maxmin, alpha, beta); } break; case nvgraph::OrAndBool: { nvgraph::OrAndBoolSemiring<ValueType_> orandbool; csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, orandbool, alpha, beta); } break; case nvgraph::LogPlus: { nvgraph::LogPlusSemiring<ValueType_> logplus; csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, logplus, alpha, beta); } break; } cudaCheckError(); } template void callTestCsrmv<int, float>(int num_rows, int *dRowOffsets, int*dColIndices, float *dValues, float *dVectorX, float *dVectorY, nvgraph::Semiring SR, float alpha, float beta); template void callTestCsrmv<int, double>(int num_rows, int *dRowOffsets, int*dColIndices, double *dValues, double *dVectorX, double *dVectorY, nvgraph::Semiring SR, double alpha, double beta);
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/lobpcg.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#if SPECTRAL_USE_LOBPCG #include "lobpcg.hxx" #include <stdio.h> #include <time.h> #include <math.h> #include <cuda.h> #include <cublas_v2.h> #include <cusolverDn.h> #include <cusparse.h> #include <curand.h> //#include "spectral_parameters.h" //#include "cuda_helper.h" //#include "cublas_helper.h" //#include "cusolver_helper.h" //#include "cusparse_helper.h" //#include "curand_helper.h" //#include "magma_helper.h" //#define COLLECT_TIME_STATISTICS 1 #undef COLLECT_TIME_STATISTICS #ifdef COLLECT_TIME_STATISTICS #include <stddef.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/sysinfo.h> #endif static double timer (void) { #ifdef COLLECT_TIME_STATISTICS struct timeval tv; cudaDeviceSynchronize(); gettimeofday(&tv, NULL); return (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; #else return 0.0; #endif } namespace nvgraph { template <typename IndexType_, typename ValueType_, bool Device_> static int print_matrix(IndexType_ m, IndexType_ n, ValueType_ * A, IndexType_ lda, const char *s){ IndexType_ i,j; ValueType_ * h_A; if (m > lda) { WARNING("print_matrix - invalid parameter (m > lda)"); return -1; } if (Device_) { h_A = (ValueType_ *)malloc(lda*n*sizeof(ValueType_)); if (!h_A) { WARNING("print_matrix - malloc failed"); return -1; } cudaMemcpy(h_A, A, lda*n*sizeof(ValueType_), cudaMemcpyDeviceToHost); cudaCheckError(); } else { h_A = A; } printf("%s\n",s); for (i=0; i<m; i++) { //assumption m<lda for (j=0; j<n; j++) { printf("%8.5f ", h_A[i+j*lda]); } printf("\n"); } if (Device_) { if (h_A) free(h_A); } return 0; } template <typename IndexType_, typename ValueType_> static __global__ void random_matrix_kernel(IndexType_ m, IndexType_ n, ValueType_ * A, IndexType_ lda, IndexType_ seed) { IndexType_ i,j,index; for (j=threadIdx.y+blockIdx.y*blockDim.y; j<n; j+=blockDim.y*gridDim.y) { for (i=threadIdx.x+blockIdx.x*blockDim.x; i<m; i+=blockDim.x*gridDim.x) { index = i+j*lda; A[index] = ((ValueType_)(((index+seed) % 253)+1))/256.0; } } } template <typename IndexType_, typename ValueType_> int random_matrix(IndexType_ m, IndexType_ n, ValueType_ * A, IndexType_ lda, IndexType_ seed, cudaStream_t s){ if (m > lda) { WARNING("random_matrix - invalid parameter (m > lda)"); return -1; } //device code dim3 gridDim, blockDim; blockDim.x = 256; blockDim.y = 1; blockDim.z = 1; gridDim.x = min((m+blockDim.x-1)/blockDim.x, 65535); gridDim.y = min((n+blockDim.y-1)/blockDim.y, 65535); gridDim.z = 1; random_matrix_kernel<IndexType_,ValueType_><<<gridDim,blockDim,0,s>>>(m,n,A,lda,seed); cudaCheckError(); /* //host code IndexType_ i,j,index; ValueType_ * h_A; h_A = (ValueType_ *)malloc(lda*n*sizeof(ValueType_)); if (!h_A) { WARNING("random_matrix - malloc failed"); return -1; } cudaMemcpy(h_A, A, lda*n*sizeof(ValueType_), cudaMemcpyDeviceToHost); cudaCheckError(); for (i=0; i<m; i++) { for (j=0; j<n; j++) { index = i+j*lda; h_A[index] = ((ValueType_)(((index+seed) % 253)+1))/256.0; //printf("%d, %d, %f, ",index, (index+seed) % 253, ((ValueType_)(((index+seed) % 253)+1))/256.0); } printf("\n"); } cudaMemcpy(A, h_A, lda*n*sizeof(ValueType_), cudaMemcpyHostToDevice); cudaCheckError(); */ return 0; } template <typename IndexType_, typename ValueType_> static __global__ void block_axmy_kernel(IndexType_ n, IndexType_ k, ValueType_ * alpha, ValueType_ *X, IndexType_ ldx, ValueType_ *Y, IndexType_ ldy) { IndexType_ i,j,index; for (j=threadIdx.y+blockIdx.y*blockDim.y; j<k; j+=blockDim.y*gridDim.y) { for (i=threadIdx.x+blockIdx.x*blockDim.x; i<n; i+=blockDim.x*gridDim.x) { index = i+j*ldx; Y[index] = Y[index] - alpha[j]*X[index]; } } } template <typename IndexType_, typename ValueType_> int block_axmy(IndexType_ n, IndexType_ k, ValueType_ * alpha, ValueType_ *X, IndexType_ ldx, ValueType_ *Y, IndexType_ ldy, cudaStream_t s) { //device code dim3 gridDim, blockDim; blockDim.x = 256; blockDim.y = 1; blockDim.z = 1; gridDim.x = min((n+blockDim.x-1)/blockDim.x, 65535); gridDim.y = min((k+blockDim.y-1)/blockDim.y, 65535); gridDim.z = 1; block_axmy_kernel<IndexType_,ValueType_><<<gridDim,blockDim,0,s>>>(n,k,alpha,X,ldx,Y,ldy); cudaCheckError(); return 0; } template <typename IndexType_, typename ValueType_> static __global__ void collect_sqrt_kernel(IndexType_ n, ValueType_ *A, IndexType_ lda, ValueType_ *E) { IndexType_ i,index; for (i=threadIdx.x+blockIdx.x*blockDim.x; i<n; i+=blockDim.x*gridDim.x) { index = i+i*lda; E[i] = std::sqrt(static_cast<ValueType_>(A[index])); } } template <typename IndexType_, typename ValueType_> int collect_sqrt_memcpy(IndexType_ n, ValueType_ *A, IndexType_ lda, ValueType_ * E, cudaStream_t s) { //device code dim3 gridDim, blockDim; blockDim.x = min(n,256); blockDim.y = 1; blockDim.z = 1; gridDim.x = min((n+blockDim.x-1)/blockDim.x, 65535); gridDim.y = 1; gridDim.z = 1; collect_sqrt_kernel<IndexType_,ValueType_><<<gridDim,blockDim,0,s>>>(n,A,lda,E); cudaCheckError(); return 0; } template <typename IndexType_, typename ValueType_, bool eigenvecs> static __global__ void convert_to_ascending_order_kernel(IndexType_ n, ValueType_ * H_dst, IndexType_ ldd, ValueType_ * E_dst, ValueType_ * H_src, IndexType_ lds, ValueType_ * E_src){ IndexType_ i,j,indexs,indexd; for (i=threadIdx.x+blockIdx.x*blockDim.x; i<n; i+=blockDim.x*gridDim.x) { E_dst[n-(i+1)] = E_src[i]; } if (eigenvecs) { for (j=threadIdx.y+blockIdx.y*blockDim.y; j<n; j+=blockDim.y*gridDim.y) { for (i=threadIdx.x+blockIdx.x*blockDim.x; i<n; i+=blockDim.x*gridDim.x) { indexs = i+j*lds; indexd = i+(n-(j+1))*ldd; H_dst[indexd] = H_src[indexs]; } } } } template <typename IndexType_, typename ValueType_, bool eigenvecs> int convert_to_ascending_order(IndexType_ n, ValueType_ * H_dst, IndexType_ ldd, ValueType_ * E_dst, ValueType_ * H_src, IndexType_ lds, ValueType_ * E_src, cudaStream_t s){ //device code dim3 gridDim, blockDim; blockDim.x = min(n,256); blockDim.y = (256+blockDim.x-1)/blockDim.x; blockDim.z = 1; gridDim.x = min((n+blockDim.x-1)/blockDim.x, 65535); gridDim.y = min((n+blockDim.y-1)/blockDim.y, 65535); gridDim.z = 1; convert_to_ascending_order_kernel<IndexType_,ValueType_,eigenvecs><<<gridDim,blockDim,0,s>>>(n,H_dst,ldd,E_dst,H_src,lds,E_src); cudaCheckError(); return 0; } template <typename IndexType_, typename ValueType_> static __global__ void compute_cond_kernel (IndexType_ n, ValueType_ *E) { //WARNING: must be launched with a single thread and block only E[0] = E[0]/E[n-1]; } template <typename IndexType_, typename ValueType_> int compute_cond(IndexType_ n, ValueType_ *E, cudaStream_t s) { //device code dim3 gridDim, blockDim; blockDim.x = 1; blockDim.y = 1; blockDim.z = 1; gridDim.x = 1; gridDim.y = 1; gridDim.z = 1; compute_cond_kernel<IndexType_,ValueType_><<<gridDim,blockDim,0,s>>>(n,E); cudaCheckError(); return 0; } template <typename IndexType_, typename ValueType_> int lobpcg_simplified(cublasHandle_t cublasHandle, cusolverDnHandle_t cusolverHandle, IndexType_ n, IndexType_ k, /*const*/ Matrix<IndexType_,ValueType_> * A, ValueType_ * __restrict__ eigVecs_dev, ValueType_ * __restrict__ eigVals_dev, IndexType_ mit, ValueType_ tol, ValueType_ * __restrict__ work_dev, IndexType_ & iter) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- LaplacianMatrix<IndexType_,ValueType_>* L = dynamic_cast< LaplacianMatrix<IndexType_,ValueType_>* >(A); //LaplacianMatrix<IndexType_,ValueType_>* L = static_cast< LaplacianMatrix<IndexType_,ValueType_>* >(A); cudaEvent_t event=NULL; cudaStream_t s_alg=NULL,s_cublas=NULL,s_cusolver=NULL,s_cusparse=NULL; //cudaStream_t s_magma=NULL; //magma_types.h: typedef cudaStream_t magma_queue_t; // Useful constants const ValueType_ zero = 0.0; const ValueType_ one = 1.0; const ValueType_ mone =-1.0; const bool sp = (sizeof(ValueType_) == 4); const ValueType_ eps = (sp) ? 1.1920929e-7f : 2.220446049250313e-16; const ValueType_ max_kappa= (sp) ? 4 : 8; //const bool use_magma = SPECTRAL_USE_MAGMA; //true; //false; const bool use_throttle = SPECTRAL_USE_THROTTLE; //true; //false; const bool use_normalized_laplacian = SPECTRAL_USE_NORMALIZED_LAPLACIAN; //true; //false; const bool use_R_orthogonalization = SPECTRAL_USE_R_ORTHOGONALIZATION; //true; //false; // Status flags //int minfo; //int nb; //int lwork; //int liwork; int Lwork; int k3 = 3*k; int k2 = 2*k; int sz = k2; //int nb1; //int nb2; //int nb3; ValueType_ kappa; ValueType_ kappa_average; //ValueType_ * h_wa=NULL; //ValueType_ * h_work=NULL; //IndexType_ * h_iwork=NULL; //ValueType_ * h_E=NULL; // Loop indices IndexType_ i,j,start; //LOBPCG subspaces ValueType_ * E=NULL; ValueType_ * Y=NULL; ValueType_ * X=NULL; ValueType_ * R=NULL; ValueType_ * P=NULL; ValueType_ * Z=NULL; ValueType_ * AX=NULL; ValueType_ * AR=NULL; ValueType_ * AP=NULL; ValueType_ * Q=NULL; ValueType_ * BX=NULL; ValueType_ * BR=NULL; ValueType_ * BP=NULL; ValueType_ * G=NULL; ValueType_ * H=NULL; ValueType_ * HU=NULL; ValueType_ * HVT=NULL; ValueType_ * nrmR=NULL; ValueType_ * h_nrmR=NULL; ValueType_ * h_kappa_history=NULL; ValueType_ * Workspace=NULL; double t_start=0.0,t_end=0.0,t_total=0.0,t_setup=0.0,t_mm=0.0,t_bdot=0.0,t_gemm=0.0,t_potrf=0.0,t_trsm=0.0,t_syevd=0.0,t_custom=0.0,t_prec=0.0,t1=0.0,t2=0.0; t_start =timer(); // Random number generator curandGenerator_t randGen; // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(n < 1) { WARNING("lobpcg_simplified - invalid parameter (n<1)"); return -1; } if(k < 1) { WARNING("lobpcg_simplified - invalid parameter (k<1)"); return -1; } if(tol < 0) { WARNING("lobpcg_simplified - invalid parameter (tol<0)"); return -1; } if(k > n) { WARNING("lobpcg_simplified - invalid parameters (k>n)"); return -1; } E = eigVals_dev; //array, not matrix, of eigenvalues Y = &work_dev[0]; //alias Y = [X,R,P] X = &work_dev[0]; //notice that X, R and P must be continuous in memory R = &work_dev[k*n]; //R = A*X-B*X*E P = &work_dev[2*k*n]; Z = &work_dev[3*k*n]; //alias Z = A*Y = [AX,AR,AP] AX= &work_dev[3*k*n]; //track A*X AR= &work_dev[4*k*n]; //track A*R (also used as temporary storage) AP= &work_dev[5*k*n]; //track A*P Q = &work_dev[6*k*n]; //alias Q = B*Y = [BX,BR,BP] BX= &work_dev[6*k*n]; //track B*X BR= &work_dev[7*k*n]; //track B*R BP= &work_dev[8*k*n]; //track B*P G = &work_dev[9*k*n]; H = &work_dev[9*k*n + k3*k3]; HU = &work_dev[9*k*n + 2*k3*k3]; HVT = &work_dev[9*k*n + 3*k3*k3]; nrmR= &work_dev[9*k*n + 4*k3*k3]; Workspace = &work_dev[9*k*n + 4*k3*k3+k]; // ------------------------------------------------------- // Variable initialization // ------------------------------------------------------- t1 =timer(); // create a CUDA stream cudaEventCreate(&event); cudaCheckError(); cudaStreamCreate(&s_alg); cudaCheckError(); ///s_alg=NULL; // set pointer mode in CUBLAS CHECK_CUBLAS(cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_HOST)); // save and set streams in CUBLAS and CUSOLVER/MAGMA CHECK_CUBLAS(cublasGetStream(cublasHandle, &s_cublas)); CHECK_CUBLAS(cublasSetStream(cublasHandle, s_alg)); //if (use_magma) { // CHECK_CUBLAS(magmablasGetKernelStream(&s_magma)); //returns cublasStatus_t // CHECK_CUBLAS(magmablasSetKernelStream(s_alg)); //returns cublasStatus_t //} //else { CHECK_CUSOLVER(cusolverDnGetStream(cusolverHandle, &s_cusolver)); CHECK_CUSOLVER(cusolverDnSetStream(cusolverHandle, s_alg)); //} // save and set streams in Laplacian/CUSPARSE L->getCUDAStream(&s_cusparse); L->setCUDAStream(s_alg); // Initialize random number generator CHECK_CURAND(curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_PHILOX4_32_10)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(randGen, 123456/*time(NULL)*/)); // Initialize initial LOBPCG subspace CHECK_CURAND(curandGenerateNormalX(randGen, X, k*n, zero, one)); ///random_matrix<IndexType_,ValueType_>(n,k,X,n,17,s_alg); //print_matrix<IndexType_,ValueType_,true>(3,3,X,n,"X"); // set nxk matrices P=0, AP=0 and BP=0 cudaMemsetAsync(P, 0, n*k*sizeof(ValueType_), s_alg); cudaCheckError(); cudaMemsetAsync(AP, 0, n*k*sizeof(ValueType_), s_alg);cudaCheckError(); cudaMemsetAsync(BP, 0, n*k*sizeof(ValueType_), s_alg);cudaCheckError(); //if (use_magma) { // //NB can be obtained through magma_get_dsytrd_nb(N). // //If JOBZ = MagmaVec and N > 1, LWORK >= max( 2*N + N*NB, 1 + 6*N + 2*N**2 ). // //If JOBZ = MagmaVec and N > 1, LIWORK >= 3 + 5*N. // nb1 = magma_get_xsytrd_nb(k, zero); // nb2 = magma_get_xsytrd_nb(k2,zero); // nb3 = magma_get_xsytrd_nb(k3,zero); // nb = max(nb1,max(nb2,nb3)); //this is needed to ensure allocations are correct even if sz is changed from k, 2*k to 3*k below // lwork = max(2*k3+k3*nb, 1+6*k3+2*k3*k3); // liwork = 3 + 5*k3; // //printf("k=%d, nb=%d, lwork=%d, liwork=%d\n",k,nb,lwork,liwork); // h_E = (ValueType_ *)malloc(k3*sizeof(h_E[0])); // h_wa = (ValueType_ *)malloc(k3*k3*sizeof(h_wa[0])); // h_work = (ValueType_ *)malloc(lwork*sizeof(h_work[0])); // h_iwork= (IndexType_ *)malloc(liwork*sizeof(h_iwork[0])); // if ((!h_E) || (!h_wa) || (!h_work) || (!h_iwork)) { // WARNING("lobpcg_simplified - malloc failed"); // return -1; // } //} if(use_throttle) { cudaHostAlloc(&h_nrmR, 2*sizeof(h_nrmR[0]), cudaHostAllocDefault); //pinned memory cudaCheckError(); } else{ h_nrmR = (ValueType_ *)malloc((k+1)*sizeof(h_nrmR[0])); } h_kappa_history = (ValueType_ *)malloc((mit+1)*sizeof(h_kappa_history[0])); if ((!h_kappa_history) || (!h_nrmR) ) { WARNING("lobpcg_simplified - malloc/cudaHostAlloc failed"); return -1; } h_kappa_history[0] = -log10(eps)/2.0; //printf("h_kappa_history[0] = %f\n",h_kappa_history[0]); t2 =timer(); t_setup+=t2-t1; // ------------------------------------------------------- // Algorithm // ------------------------------------------------------- //BX= B*X if (use_normalized_laplacian) { L->dm(k, one, X, zero, BX); } else { cudaMemcpyAsync(BX, X, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg); cudaCheckError(); } //print_matrix<IndexType_,ValueType_,true>(3,3,BX,n,"BX=B*X"); //G = X'*BX t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, k, k, n, &one, X, n, BX, n, &zero, G, k)); t2 =timer(); t_bdot+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(k,k,G,k,"G=X'*BX"); //S = chol(G); t1 =timer(); //if (false /*use_magma*/) { // MAGMACHECK(magma_xpotrf(k, G, k, &minfo)); //} //else{ CHECK_CUSOLVER(cusolverXpotrf_bufferSize(cusolverHandle,k,G,k,&Lwork)); //Workspace was already over allocated earlier CHECK_CUSOLVER(cusolverXpotrf(cusolverHandle,k,G,k,Workspace,Lwork,(int *)&Workspace[Lwork])); //} t2 =timer(); t_potrf+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(k,k,G,k,"S=chol(G,lower_part_stored)"); //X = X/S (notice that in MATLAB S has L', therefore extra transpose (CUBLAS_OP_T) is required below) t1 =timer(); CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_RIGHT,CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT,n,k,&one,G,k, X,n)); //BX=BX/S CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_RIGHT,CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT,n,k,&one,G,k,BX,n)); t2 =timer(); t_trsm+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(3,3,X, n,"X = X/S"); //print_matrix<IndexType_,ValueType_,true>(3,3,BX,n,"BX=BX/S"); //AX = A*X t1 =timer(); L->mm(k, one, X, zero, AX); t2 =timer(); t_mm+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(3,3,AX,n,"AX=A*X"); //H = X'*AX t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, k, k, n, &one, X, n, AX, n, &zero, H, k)); t2 =timer(); t_bdot+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(k,k,H,k,"H=X'*A*X"); //[W,E]=eig(H) t1 =timer(); //if (use_magma) { // MAGMACHECK(magma_xsyevd(k, H, k, h_E, h_wa, k, h_work, lwork, h_iwork, liwork, &minfo)); // cudaMemcpy(E, h_E, k*sizeof(ValueType_), cudaMemcpyHostToDevice); cudaCheckError(); //} //else { //WARNING: using eigVecs_dev as a temporary space CHECK_CUSOLVER(cusolverXgesvd_bufferSize(cusolverHandle,k,k,H,k,HU,k,HVT,k,&Lwork)); //Workspace was already over allocated earlier CHECK_CUSOLVER(cusolverXgesvd(cusolverHandle,k,k,H,k,eigVecs_dev,HU,k,HVT,k,Workspace,Lwork,NULL,(int *)&Workspace[Lwork])); convert_to_ascending_order<IndexType_,ValueType_,true>(k,H,k,E,HU,k,eigVecs_dev,s_alg); //} t2 =timer(); t_syevd+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(k,1,E,k,"E, from [W,E]=eig(H)"); //print_matrix<IndexType_,ValueType_,true>(k,k,H,k,"W, from [W,E]=eig(H)"); //X = X*W t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k, &one, X, n, H, k, &zero, AR, n)); cudaMemcpyAsync(X, AR, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg); cudaCheckError(); //BX = BX*W CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k, &one,BX, n, H, k, &zero, AR, n)); cudaMemcpyAsync(BX,AR, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg); cudaCheckError(); //AX = AX*W (notice that R=AX below, which we will use later on when computing residual R) CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k, &one, AX, n, H, k, &zero, R, n)); cudaMemcpyAsync(AX, R, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg); cudaCheckError(); t2 =timer(); t_gemm+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(3,3,X, n,"X = X*W"); //print_matrix<IndexType_,ValueType_,true>(3,3,BX,n,"BX=BX*W"); //print_matrix<IndexType_,ValueType_,true>(3,3,AX,n,"AX=AX*W"); // start main loop for(i=0; i<mit; i++){ //save iteration number (an output parameter) iter = i; //R = AX - BX*E t1 =timer(); block_axmy<IndexType_,ValueType_>(n,k,E,BX,n,R,n,s_alg); t2 =timer(); t_custom+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(3,3,R,n,"R=AX-X*E"); //check convergence t1 =timer(); if (use_throttle) { //use throttle technique if ((i % 2) == 0) { //notice can not use G=R'*BR, because it is != R'*R, which is needed at this point CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, k, k, n, &one, R, n, R, n, &zero, G, k)); collect_sqrt_memcpy<IndexType_,ValueType_>(k,G,k,nrmR,s_alg); cudaMemcpyAsync(h_nrmR, &nrmR[k-1], sizeof(ValueType_), cudaMemcpyDeviceToHost, s_alg); cudaCheckError(); cudaEventRecord(event, s_alg); cudaCheckError(); } if (((i+1) % 2) == 0) { cudaEventSynchronize(event); cudaCheckError(); if (h_nrmR[0] < tol) { break; } } } else { //use naive approach for (j=0; j<k; j++) { CHECK_CUBLAS(cublasXnrm2(cublasHandle, n, &R[j*n], 1, &h_nrmR[j])); //printf("h_nrmR[%d]=%f \n", j,h_nrmR[j]); } if (h_nrmR[k-1] < tol) { break; } } t2 =timer(); t_custom+=t2-t1; //R=M\R preconditioning step t1 =timer(); L->prec_solve(k,one,R,eigVecs_dev); t2 =timer(); t_prec+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(3,3,R,n,"R=M\R"); //make residuals B orthogonal to X (I'm not sure this is needed) //R = R - X*(BX'*R); if (use_R_orthogonalization) { t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, k, k, n, &one, BX, n, R, n, &zero, G, k)); t2 =timer(); t_bdot+=t2-t1; t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k, &mone, X, n, G, k, &one, R, n)); t2 =timer(); t_gemm+=t2-t1; } //BX= B*X if (use_normalized_laplacian) { L->dm(k, one, R, zero, BR); } else { cudaMemcpyAsync(BR, R, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg); cudaCheckError(); } //G=R'*BR t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, k, k, n, &one, R, n, BR, n, &zero, G, k)); t2 =timer(); t_bdot+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(k,k,G,k,"G=R'*BR"); //S = chol(G); t1 =timer(); //if (false /*use_magma*/) { // MAGMACHECK(magma_xpotrf(k, G, k, &minfo)); //} //else{ CHECK_CUSOLVER(cusolverXpotrf_bufferSize(cusolverHandle,k,G,k,&Lwork)); //Workspace was already over allocated earlier CHECK_CUSOLVER(cusolverXpotrf(cusolverHandle,k,G,k,Workspace,Lwork,(int *)&Workspace[Lwork])); // } t2 =timer(); t_potrf+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(k,k,G,k,"S=chol(G,lower_part_stored)"); //R = R/S (notice that in MATLAB S has L', therefore extra transpose (CUBLAS_OP_T) is required below) t1 =timer(); CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_RIGHT,CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT,n,k,&one,G,k,R,n)); //BR=BR/S CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_RIGHT,CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT,n,k,&one,G,k,BR,n)); t2 =timer(); t_trsm+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(3,3, R,n,"R = R/S"); //print_matrix<IndexType_,ValueType_,true>(3,3,BR,n,"BR=BR/S"); //G=Y'*Q (where Q=B*Y) //std::cout<<"size : "<< sz<< std::endl; //print_matrix<IndexType_,ValueType_,true>(sz,sz,Y,sz,"Y"); //print_matrix<IndexType_,ValueType_,true>(sz,sz,Q,sz,"Q"); t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, sz, sz, n, &one, Y, n, Q, n, &zero, G, sz)); t2 =timer(); t_bdot+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(sz,sz,G,sz,"G=Y'*Q"); //check conditioning of the subspace restart strategy //WARNING: We need to compute condition number of matrix G in ||.||_2. //Normally to compute these condition number we would perform a singular value //decomposition and have kappa(G) = max_singular_value/min_singular_value of G. t1 =timer(); //if (use_magma) { // //Notice also that MAGMA does not have GPU interface to singular_value decomposition, // //but it does have one for the eigenvalue routine. We will take advantage of it: // //Since G is symmetric we can also say that singular_value(G) = sqrt(eigenvalue(A'*A)) = eigenvalue(A), // //therefore kappa(G) = max_eigenvalue_G/min_eigenvalue_G // //[W,E]=eig(H) // MAGMACHECK(magma_xsyevd_cond(sz, G, sz, h_E, h_wa, sz, h_work, lwork, h_iwork, liwork, &minfo)); // kappa = log10(h_E[sz-1]/h_E[0])+1; // //printf("cond=%f (%f/%f), %f\n",h_E[sz-1]/h_E[0],h_E[sz-1],h_E[0],log10(h_E[sz-1]/h_E[0])+1); // //print_matrix<IndexType_,ValueType_,false>(sz,1,h_E,sz,"h_E, sing_values(G)=eig(G) in cond(G)"); //} //else { if (sz > n*k) { //WARNING: using eigVecs_dev as a temporary space (for sz singular values) WARNING("lobpcg_simplified - temporary space insufficient (sz > n*k)"); return -1; } CHECK_CUSOLVER(cusolverXgesvd_bufferSize(cusolverHandle,sz,sz,G,sz,HU,sz,HVT,sz,&Lwork)); //Workspace was already over allocated earlier CHECK_CUSOLVER(cusolverXgesvd(cusolverHandle,sz,sz,G,sz,eigVecs_dev,HU,sz,HVT,sz,Workspace,Lwork,NULL,(int *)&Workspace[Lwork])); compute_cond<IndexType_,ValueType_>(sz,eigVecs_dev,s_alg); //condition number is eigVecs_dev[0] = eigVecs_dev[0]/eigVecs_dev[sz-1] cudaMemcpy(&kappa, eigVecs_dev, sizeof(ValueType_), cudaMemcpyDeviceToHost); cudaCheckError();//FIX LATER using throttle technique kappa = log10(kappa)+1.0; ///kappa =1; //} t2 =timer(); t_syevd+=t2-t1; //printf("cond=%f\n", kappa); //print_matrix<IndexType_,ValueType_,true>(sz,sz,G,sz,"G, should not have changed cond(G)"); //WARNING: will compute average (not mean, like MATLAB code) because it is easier to code start = max(0,i-10-((int)round(log(static_cast<float>(k))))); kappa_average = zero; for(j=start; j<=i; j++) { //printf("%f ",h_kappa_history[j]); kappa_average += h_kappa_history[j]; } //printf("\n"); kappa_average = kappa_average/(i-start+1); if (((kappa/kappa_average) > 2 && (kappa > 2)) || (kappa > max_kappa)) { //exclude P from Y=[X,R] sz = k2; //printf("restart=%d (%d, %d, %d, %d) (%f %f %f)\n",i,(int)round(log(k)),i-10-((int)round(log(k))),start,i-start+1,kappa,kappa_average,max_kappa); //recompute G=Y'*Q and corresponding condition number (excluding P) t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, sz, sz, n, &one, Y, n, Q, n, &zero, G, sz)); t2 =timer(); t_bdot+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(sz,sz,G,sz,"G=Y'*Y"); t1 =timer(); //if (use_magma) { // MAGMACHECK(magma_xsyevd_cond(sz, G, sz, h_E, h_wa, sz, h_work, lwork, h_iwork, liwork, &minfo)); // kappa = log10(h_E[sz-1]/h_E[0])+1; //} //else { if (sz > n*k) { //WARNING: using eigVecs_dev as a temporary space (for sz singular values) WARNING("lobpcg_simplified - temporary space insufficient (sz > n*k)"); return -1; } CHECK_CUSOLVER(cusolverXgesvd_bufferSize(cusolverHandle,sz,sz,G,sz,HU,sz,HVT,sz,&Lwork)); //Workspace was already over allocated earlier CHECK_CUSOLVER(cusolverXgesvd(cusolverHandle,sz,sz,G,sz,eigVecs_dev,HU,sz,HVT,sz,Workspace,Lwork,NULL,(int *)&Workspace[Lwork])); compute_cond<IndexType_,ValueType_>(sz,eigVecs_dev,s_alg); //condition number is eigVecs_dev[0] = eigVecs_dev[0]/eigVecs_dev[sz-1] cudaMemcpy(&kappa, eigVecs_dev, sizeof(ValueType_), cudaMemcpyDeviceToHost); cudaCheckError(); //FIX LATER using throttle technique kappa = log10(kappa)+1.0; ///kappa =1; //} t2 =timer(); t_syevd+=t2-t1; //printf("cond=%f\n", kappa); //print_matrix<IndexType_,ValueType_,false>(sz,1,h_E,sz,"h_E, sing_values(G)=eig(G) in cond(G)"); //print_matrix<IndexType_,ValueType_,true>(sz,sz,G,sz,"G, should not have changed cond(G)"); } h_kappa_history[i+1] = kappa; //WARNING: the computation of condition number destroys the //lower triangle of G (including diagonal), so it must be recomputed again. //recompute G=Y'*Q t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, sz, sz, n, &one, Y, n, Q, n, &zero, G, sz)); t2 =timer(); t_bdot+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(sz,sz,G,sz,"G=Y'*Q (recomputing)"); //AR = A*R t1 =timer(); L->mm(k, one, R, zero, AR); t2 =timer(); t_mm+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(3,k,AR,n,"AR=A*R"); //H = Y'*Z t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, sz, sz, n, &one, Y, n, Z, n, &zero, H, sz)); t2 =timer(); t_bdot+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(sz,sz,H,sz,"H=Y'*A*Y"); //Approach 1: //S = chol(G); t1 =timer(); //if (false /*use_magma*/) { // MAGMACHECK(magma_xpotrf(sz, G, sz, &minfo)); //} //else{ CHECK_CUSOLVER(cusolverXpotrf_bufferSize(cusolverHandle,sz,G,sz,&Lwork)); //Workspace was over already over allocated earlier CHECK_CUSOLVER(cusolverXpotrf(cusolverHandle,sz,G,sz,Workspace,Lwork,(int *)&Workspace[Lwork])); //} t2 =timer(); t_potrf+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(sz,sz,G,sz,"S=chol(G,lower_part_stored)"); //H = S'\ H /S (notice that in MATLAB S has L', therefore extra transpose (CUBLAS_OP_T) is required below) t1 =timer(); CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_RIGHT,CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT,sz,sz,&one,G,sz,H,sz)); CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_N,CUBLAS_DIAG_NON_UNIT,sz,sz,&one,G,sz,H,sz)); t2 =timer(); t_trsm+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(sz,sz,H,sz,"H = S'\\ H /S"); //[W,E]=eig(S'\ H /S); t1 =timer(); //if (use_magma) { // MAGMACHECK(magma_xsyevd(sz, H, sz, h_E, h_wa, sz, h_work, lwork, h_iwork, liwork, &minfo)); // cudaMemcpy(E, h_E, k*sizeof(ValueType_), cudaMemcpyHostToDevice); cudaCheckError(); //only have k spaces in E, but h_E have sz eigs //} //else { if (sz > n*k) { //WARNING: using eigVecs_dev as a temporary space (for sz singular values) WARNING("lobpcg_simplified - temporary space insufficient (sz > n*k)"); return -1; } CHECK_CUSOLVER(cusolverXgesvd_bufferSize(cusolverHandle,sz,sz,H,sz,HU,sz,HVT,sz,&Lwork)); //Workspace was already over allocated earlier CHECK_CUSOLVER(cusolverXgesvd(cusolverHandle,sz,sz,H,sz,eigVecs_dev,HU,sz,HVT,sz,Workspace,Lwork,NULL,(int *)&Workspace[Lwork])); convert_to_ascending_order<IndexType_,ValueType_,true>(sz,H,sz,E,HU,sz,eigVecs_dev,s_alg); //} t2 =timer(); t_syevd+=t2-t1; //print_matrix<IndexType_,ValueType_,false>(sz,1,h_E,sz,"h_E, from [W,E]=eig(S'\\ H /S)"); //print_matrix<IndexType_,ValueType_, true>(k,1,E,k,"E, smallest k eigs from [W,E]=eig(S'\\ H /S)"); //print_matrix<IndexType_,ValueType_, true>(sz,sz,H,sz,"W, from [W,E]=eig(S'\\ H /S)"); //W=S\W (recover original eigvectors) t1 =timer(); CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT,sz,sz,&one,G,sz,H,sz)); t2 =timer(); t_trsm+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(sz,sz,H,sz,"W=S\\W"); //WARNING: using eigVecs_dev as a temporary space //X =Y*W(:,1:k); //notice can not use X for the result directly, because it is part of Y (and aliased by Y) t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, sz, &one, Y, n, H, sz, &zero, eigVecs_dev, n)); cudaMemcpyAsync(X, eigVecs_dev, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg); cudaCheckError(); //BX=Q*W(:,1:k); //notice can not use BX for the result directly, because it is part of Q (and aliased by Q) CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, sz, &one, Q, n, H, sz, &zero, eigVecs_dev, n)); cudaMemcpyAsync(BX, eigVecs_dev, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg); cudaCheckError(); //AX=Z*W(:,1:k); //notice can not use AX for the result directly, because it is part of Z (and aliased by Z) CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, sz, &one, Z, n, H, sz, &zero, eigVecs_dev, n)); cudaMemcpyAsync(AX, eigVecs_dev, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg); cudaCheckError(); t2 =timer(); t_gemm+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(3,3, X,n,"X =Y*W(:,1:k)"); //print_matrix<IndexType_,ValueType_,true>(3,3,BX,n,"BX=Q*W(:,1:k)"); //print_matrix<IndexType_,ValueType_,true>(3,3,AX,n,"AX=Z*W(:,1:k)"); //update P t1 =timer(); if (sz == k2) { //P = R*W(k+1:2*k,1:k); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k, &one, R, n, &H[k], sz, &zero, P, n)); //BP=BR*W(k+1:2*k,1:k); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k, &one,BR, n, &H[k], sz, &zero,BP, n)); //AP=AR*W(k+1:2*k,1:k); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k, &one,AR, n, &H[k], sz, &zero,AP, n)); //print_matrix<IndexType_,ValueType_,true>(3,3, P,n,"P = R*W(k+1:2*k,1:k)"); //print_matrix<IndexType_,ValueType_,true>(3,3,BP,n,"BP=BR*W(k+1:2*k,1:k)"); //print_matrix<IndexType_,ValueType_,true>(3,3,AP,n,"AP=AR*W(k+1:2*k,1:k)"); } else { //(sz == k3) //P= R*W(k+1:2*k,1:k) + P*W(2*k+1:3*k,1:k); and recall that Y = [X,R,P] CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k2, &one, &Y[n*k], n, &H[k], sz, &zero, eigVecs_dev, n)); cudaMemcpyAsync(P, eigVecs_dev, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg);cudaCheckError(); //BP=BR*W(k+1:2*k,1:k) + BP*W(2*k+1:3*k,1:k); and recall that Q = [BX,BR,BP] CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k2, &one, &Q[n*k], n, &H[k], sz, &zero, eigVecs_dev, n)); cudaMemcpyAsync(BP, eigVecs_dev, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg);cudaCheckError(); //AP=AR*W(k+1:2*k,1:k) + AP*W(2*k+1:3*k,1:k); and recall that Z = [AX,AR,AP] CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_N,CUBLAS_OP_N, n, k, k2, &one, &Z[n*k], n, &H[k], sz, &zero, eigVecs_dev, n)); cudaMemcpyAsync(AP, eigVecs_dev, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg);cudaCheckError(); //print_matrix<IndexType_,ValueType_,true>(3,3, P,n,"P = R*W(k+1:2*k,1:k) + P*W(2*k+1:3*k,1:k)"); //print_matrix<IndexType_,ValueType_,true>(3,3,BP,n,"BP=BR*W(k+1:2*k,1:k) + BP*W(2*k+1:3*k,1:k)"); //print_matrix<IndexType_,ValueType_,true>(3,3,AP,n,"AP=AR*W(k+1:2*k,1:k) + AP*W(2*k+1:3*k,1:k)"); } t2 =timer(); t_gemm+=t2-t1; //orthonormalize P //G = P'*BP t1 =timer(); CHECK_CUBLAS(cublasXgemm(cublasHandle,CUBLAS_OP_T,CUBLAS_OP_N, k, k, n, &one, P, n, BP, n, &zero, G, k)); t2 =timer(); t_bdot+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(k,k,G,k,"G=P'*BP"); //S = chol(G); t1 =timer(); //if (false /*use_magma*/) { // MAGMACHECK(magma_xpotrf(k, G, k, &minfo)); //} //else{ CHECK_CUSOLVER(cusolverXpotrf_bufferSize(cusolverHandle,k,G,k,&Lwork)); //Workspace was already over allocated earlier CHECK_CUSOLVER(cusolverXpotrf(cusolverHandle,k,G,k,Workspace,Lwork,(int *)&Workspace[Lwork])); //} t2 =timer(); t_potrf+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(k,k,G,k,"S=chol(G,lower_part_stored)"); //P = P/S (notice that in MATLAB S has L', therefore extra transpose (CUBLAS_OP_T) is required below) t1 =timer(); CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_RIGHT,CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT,n,k,&one,G,k,P,n)); //BP = BP/S CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_RIGHT,CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT,n,k,&one,G,k,BP,n)); //AP = AP/S CHECK_CUBLAS(cublasXtrsm(cublasHandle,CUBLAS_SIDE_RIGHT,CUBLAS_FILL_MODE_LOWER,CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT,n,k,&one,G,k,AP,n)); t2 =timer(); t_trsm+=t2-t1; //print_matrix<IndexType_,ValueType_,true>(3,3, P,n,"P = P/S"); //print_matrix<IndexType_,ValueType_,true>(3,3,BP,n,"BP=BP/S"); //print_matrix<IndexType_,ValueType_,true>(3,3,AP,n,"AP=AP/S"); //copy AX into R (to satisfy assumption in the next iteration) cudaMemcpyAsync(R, AX, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg);cudaCheckError(); //reset sz for the next iteration sz=k3; //printf("--- %d ---\n",i); } t_end =timer(); t_total+=t_end-t_start; //WARNING: In the MATLAB code at this point X is made a section of A, //which I don't think is necessary, but something to keep in mind, //in case something goes wrong in the future. cudaMemcpyAsync(eigVecs_dev, X, n*k*sizeof(ValueType_), cudaMemcpyDeviceToDevice, s_alg); cudaCheckError(); //free temporary host memory cudaStreamSynchronize(s_alg); cudaCheckError(); //if (use_magma) { // if (h_E) free(h_E); // if (h_wa) free(h_wa); // if (h_work) free(h_work); // if (h_iwork) free(h_iwork); //} if(use_throttle) { cudaFreeHost(h_nrmR);cudaCheckError(); //pinned } else { if (h_nrmR) free(h_nrmR); } if (h_kappa_history) free(h_kappa_history); cudaEventDestroy(event);cudaCheckError(); if (s_alg) {cudaStreamDestroy(s_alg);cudaCheckError();} //revert CUBLAS and CUSOLVER/MAGMA streams CHECK_CUBLAS(cublasSetStream(cublasHandle, s_cublas)); //if (use_magma) { // CHECK_CUBLAS(magmablasSetKernelStream(s_magma)); //returns cublasStatus_t //} //else { CHECK_CUSOLVER(cusolverDnSetStream(cusolverHandle, s_cusolver)); //} //revert Laplacian/CUSPARSE streams L->setCUDAStream(s_cusparse); #ifdef COLLECT_TIME_STATISTICS //timing statistics printf("-------------------------\n"); printf("time eigsolver [total] %f\n",t_total); printf("time eigsolver [L->pr] %f\n",t_prec); printf("time eigsolver [potrf] %f\n",t_potrf); printf("time eigsolver [syevd] %f\n",t_syevd); printf("time eigsolver [trsm] %f\n",t_trsm); printf("time eigsolver [bdot] %f\n",t_bdot); printf("time eigsolver [gemm] %f\n",t_gemm); printf("time eigsolver [L->mm] %f\n",t_mm); printf("time eigsolver [custom]%f\n",t_custom); printf("time eigsolver [setup] %f\n",t_setup); printf("time eigsolver [other] %f\n",t_total-(t_prec+t_potrf+t_syevd+t_trsm+t_bdot+t_gemm+t_mm+t_custom+t_setup)); #endif return 0; } // ========================================================= // Explicit instantiation // ========================================================= template int lobpcg_simplified<int,float> (cublasHandle_t cublasHandle, cusolverDnHandle_t cusolverHandle, int n, int k, /*const*/ Matrix<int,float> * A, float * __restrict__ eigVecs_dev, float * __restrict__ eigVals_dev, int maxIter, float tol, float * __restrict__ work_dev, int &iter); template int lobpcg_simplified<int,double> (cublasHandle_t cublasHandle, cusolverDnHandle_t cusolverHandle, int n, int k, /*const*/ Matrix<int,double> * A, double * __restrict__ eigVecs_dev, double * __restrict__ eigVals_dev, int maxIter, double tol, double * __restrict__ work_dev, int &iter); } //#endif //enable/disable lobpcg
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/bfs.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <iomanip> #include "bfs.hxx" #include <limits> #include "nvgraph_error.hxx" #include "bfs_kernels.cu" using namespace bfs_kernels; namespace nvgraph { enum BFS_ALGO_STATE { TOPDOWN, BOTTOMUP }; template<typename IndexType> NVGRAPH_ERROR Bfs<IndexType>::setup() { // Determinism flag, false by default deterministic = false; //Working data //Each vertex can be in the frontier at most once cudaMalloc(&frontier, n * sizeof(IndexType)); cudaCheckError() ; //We will update frontier during the execution //We need the orig to reset frontier, or cudaFree original_frontier = frontier; //size of bitmaps for vertices vertices_bmap_size = (n / (8 * sizeof(int)) + 1); //ith bit of visited_bmap is set <=> ith vertex is visited cudaMalloc(&visited_bmap, sizeof(int) * vertices_bmap_size); cudaCheckError() ; //ith bit of isolated_bmap is set <=> degree of ith vertex = 0 cudaMalloc(&isolated_bmap, sizeof(int) * vertices_bmap_size); cudaCheckError() ; //vertices_degree[i] = degree of vertex i cudaMalloc(&vertex_degree, sizeof(IndexType) * n); cudaCheckError() ; //Cub working data cub_exclusive_sum_alloc(n + 1, d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes); //We will need (n+1) ints buffer for two differents things (bottom up or top down) - sharing it since those uses are mutually exclusive cudaMalloc(&buffer_np1_1, (n + 1) * sizeof(IndexType)); cudaCheckError() ; cudaMalloc(&buffer_np1_2, (n + 1) * sizeof(IndexType)); cudaCheckError() ; //Using buffers : top down //frontier_vertex_degree[i] is the degree of vertex frontier[i] frontier_vertex_degree = buffer_np1_1; //exclusive sum of frontier_vertex_degree exclusive_sum_frontier_vertex_degree = buffer_np1_2; //Using buffers : bottom up //contains list of unvisited vertices unvisited_queue = buffer_np1_1; //size of the "last" unvisited queue : size_last_unvisited_queue //refers to the size of unvisited_queue //which may not be up to date (the queue may contains vertices that are now visited) //We may leave vertices unvisited after bottom up main kernels - storing them here left_unvisited_queue = buffer_np1_2; //We use buckets of edges (32 edges per bucket for now, see exact macro in bfs_kernels). frontier_vertex_degree_buckets_offsets[i] is the index k such as frontier[k] is the source of the first edge of the bucket //See top down kernels for more details cudaMalloc( &exclusive_sum_frontier_vertex_buckets_offsets, ((nnz / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2) * sizeof(IndexType)); cudaCheckError() ; //Init device-side counters //Those counters must be/can be reset at each bfs iteration //Keeping them adjacent in memory allow use call only one cudaMemset - launch latency is the current bottleneck cudaMalloc(&d_counters_pad, 4 * sizeof(IndexType)); cudaCheckError() ; d_new_frontier_cnt = &d_counters_pad[0]; d_mu = &d_counters_pad[1]; d_unvisited_cnt = &d_counters_pad[2]; d_left_unvisited_cnt = &d_counters_pad[3]; //Lets use this int* for the next 3 lines //Its dereferenced value is not initialized - so we dont care about what we put in it IndexType * d_nisolated = d_new_frontier_cnt; cudaMemsetAsync(d_nisolated, 0, sizeof(IndexType), stream); cudaCheckError() ; //Computing isolated_bmap //Only dependent on graph - not source vertex - done once flag_isolated_vertices(n, isolated_bmap, row_offsets, vertex_degree, d_nisolated, stream); cudaMemcpyAsync(&nisolated, d_nisolated, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); cudaCheckError() ; //We need nisolated to be ready to use cudaStreamSynchronize(stream); cudaCheckError() ; return NVGRAPH_OK; } template<typename IndexType> NVGRAPH_ERROR Bfs<IndexType>::configure( IndexType *_distances, IndexType *_predecessors, int *_edge_mask) { distances = _distances; predecessors = _predecessors; edge_mask = _edge_mask; useEdgeMask = (edge_mask != NULL); computeDistances = (distances != NULL); computePredecessors = (predecessors != NULL); //We need distances to use bottom up if (directed && !computeDistances) cudaMalloc(&distances, n * sizeof(IndexType)); cudaCheckError() ; return NVGRAPH_OK; } template<typename IndexType> NVGRAPH_ERROR Bfs<IndexType>::traverse(IndexType source_vertex) { //Init visited_bmap //If the graph is undirected, we not that //we will never discover isolated vertices (in degree = out degree = 0) //we avoid a lot of work by flagging them now //in g500 graphs they represent ~25% of total vertices //more than that for wiki and twitter graphs if (directed) { cudaMemsetAsync(visited_bmap, 0, vertices_bmap_size * sizeof(int), stream); } else { cudaMemcpyAsync( visited_bmap, isolated_bmap, vertices_bmap_size * sizeof(int), cudaMemcpyDeviceToDevice, stream); } cudaCheckError() ; //If needed, setting all vertices as undiscovered (inf distance) //We dont use computeDistances here //if the graph is undirected, we may need distances even if //computeDistances is false if (distances) fill_vec(distances, n, vec_t<IndexType>::max, stream); //If needed, setting all predecessors to non-existent (-1) if (computePredecessors) { cudaMemsetAsync(predecessors, -1, n * sizeof(IndexType), stream); cudaCheckError() ; } // //Initial frontier // frontier = original_frontier; if (distances) { cudaMemsetAsync(&distances[source_vertex], 0, sizeof(IndexType), stream); cudaCheckError() ; } //Setting source_vertex as visited //There may be bit already set on that bmap (isolated vertices) - if the graph is undirected int current_visited_bmap_source_vert = 0; if (!directed) { cudaMemcpyAsync(&current_visited_bmap_source_vert, &visited_bmap[source_vertex / INT_SIZE], sizeof(int), cudaMemcpyDeviceToHost); cudaCheckError() ; //We need current_visited_bmap_source_vert cudaStreamSynchronize(stream); cudaCheckError() ; //We could detect that source is isolated here } int m = (1 << (source_vertex % INT_SIZE)); //In that case, source is isolated, done now if (!directed && (m & current_visited_bmap_source_vert)) { //Init distances and predecessors are done, (cf Streamsync in previous if) cudaCheckError() ; return NVGRAPH_OK; } m |= current_visited_bmap_source_vert; cudaMemcpyAsync( &visited_bmap[source_vertex / INT_SIZE], &m, sizeof(int), cudaMemcpyHostToDevice, stream); cudaCheckError() ; //Adding source_vertex to init frontier cudaMemcpyAsync( &frontier[0], &source_vertex, sizeof(IndexType), cudaMemcpyHostToDevice, stream); cudaCheckError() ; //mf : edges in frontier //nf : vertices in frontier //mu : edges undiscovered //nu : nodes undiscovered //lvl : current frontier's depth IndexType mf, nf, mu, nu; bool growing; IndexType lvl = 1; //Frontier has one vertex nf = 1; //all edges are undiscovered (by def isolated vertices have 0 edges) mu = nnz; //all non isolated vertices are undiscovered (excepted source vertex, which is in frontier) //That number is wrong if source_vertex is also isolated - but it's not important nu = n - nisolated - nf; //Last frontier was 0, now it is 1 growing = true; IndexType size_last_left_unvisited_queue = n; //we just need value > 0 IndexType size_last_unvisited_queue = 0; //queue empty //Typical pre-top down workflow. set_frontier_degree + exclusive-scan set_frontier_degree(frontier_vertex_degree, frontier, vertex_degree, nf, stream); exclusive_sum( d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes, frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); cudaMemcpyAsync( &mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), cudaMemcpyDeviceToHost, stream); cudaCheckError() ; //We need mf cudaStreamSynchronize(stream); cudaCheckError() ; //At first we know we have to use top down BFS_ALGO_STATE algo_state = TOPDOWN; //useDistances : we check if a vertex is a parent using distances in bottom up - distances become working data //undirected g : need parents to be in children's neighbors bool can_use_bottom_up = !directed && distances; while (nf > 0) { //Each vertices can appear only once in the frontierer array - we know it will fit new_frontier = frontier + nf; IndexType old_nf = nf; resetDevicePointers(); if (can_use_bottom_up) { //Choosing algo //Finite machine described in http://parlab.eecs.berkeley.edu/sites/all/parlab/files/main.pdf switch (algo_state) { case TOPDOWN: if (mf > mu / alpha) algo_state = BOTTOMUP; break; case BOTTOMUP: if (!growing && nf < n / beta) { //We need to prepare the switch back to top down //We couldnt keep track of mu during bottom up - because we dont know what mf is. Computing mu here count_unvisited_edges( unvisited_queue, size_last_unvisited_queue, visited_bmap, vertex_degree, d_mu, stream); //Typical pre-top down workflow. set_frontier_degree + exclusive-scan set_frontier_degree(frontier_vertex_degree, frontier, vertex_degree, nf, stream); exclusive_sum( d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes, frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); cudaMemcpyAsync( &mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), cudaMemcpyDeviceToHost, stream); cudaCheckError() ; cudaMemcpyAsync(&mu, d_mu, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); cudaCheckError() ; //We will need mf and mu cudaStreamSynchronize(stream); cudaCheckError() ; algo_state = TOPDOWN; } break; } } //Executing algo switch (algo_state) { case TOPDOWN: compute_bucket_offsets( exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets, nf, mf, stream); frontier_expand( row_offsets, col_indices, frontier, nf, mf, lvl, new_frontier, d_new_frontier_cnt, exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets, visited_bmap, distances, predecessors, edge_mask, isolated_bmap, directed, stream, deterministic); mu -= mf; cudaMemcpyAsync( &nf, d_new_frontier_cnt, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); cudaCheckError(); //We need nf cudaStreamSynchronize(stream); cudaCheckError(); if (nf) { //Typical pre-top down workflow. set_frontier_degree + exclusive-scan set_frontier_degree(frontier_vertex_degree, new_frontier, vertex_degree, nf, stream); exclusive_sum( d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes, frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); cudaMemcpyAsync( &mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), cudaMemcpyDeviceToHost, stream); cudaCheckError() ; //We need mf cudaStreamSynchronize(stream); cudaCheckError() ; } break; case BOTTOMUP: fill_unvisited_queue(visited_bmap, vertices_bmap_size, n, unvisited_queue, d_unvisited_cnt, stream, deterministic); size_last_unvisited_queue = nu; bottom_up_main(unvisited_queue, size_last_unvisited_queue, left_unvisited_queue, d_left_unvisited_cnt, visited_bmap, row_offsets, col_indices, lvl, new_frontier, d_new_frontier_cnt, distances, predecessors, edge_mask, stream, deterministic); //The number of vertices left unvisited decreases //If it wasnt necessary last time, it wont be this time if (size_last_left_unvisited_queue) { cudaMemcpyAsync( &size_last_left_unvisited_queue, d_left_unvisited_cnt, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); cudaCheckError() ; //We need last_left_unvisited_size cudaStreamSynchronize(stream); cudaCheckError() ; bottom_up_large( left_unvisited_queue, size_last_left_unvisited_queue, visited_bmap, row_offsets, col_indices, lvl, new_frontier, d_new_frontier_cnt, distances, predecessors, edge_mask, stream, deterministic); } cudaMemcpyAsync( &nf, d_new_frontier_cnt, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); cudaCheckError() ; //We will need nf cudaStreamSynchronize(stream); cudaCheckError() ; break; } //Updating undiscovered edges count nu -= nf; //Using new frontier frontier = new_frontier; growing = (nf > old_nf); ++lvl; } cudaCheckError() ; return NVGRAPH_OK; } //Just used for benchmarks now template<typename IndexType> NVGRAPH_ERROR Bfs<IndexType>::traverse(IndexType *source_vertices, IndexType nsources) { for (IndexType i = 0; i < nsources; ++i) traverse(source_vertices[i]); return NVGRAPH_OK; } template<typename IndexType> void Bfs<IndexType>::resetDevicePointers() { cudaMemsetAsync(d_counters_pad, 0, 4 * sizeof(IndexType), stream); cudaCheckError() ; } template<typename IndexType> void Bfs<IndexType>::clean() { cudaCheckError() ; //the vectors have a destructor that takes care of cleaning cudaFree(original_frontier); cudaFree(visited_bmap); cudaFree(isolated_bmap); cudaFree(vertex_degree); cudaFree(d_cub_exclusive_sum_storage); cudaFree(buffer_np1_1); cudaFree(buffer_np1_2); cudaFree(exclusive_sum_frontier_vertex_buckets_offsets); cudaFree(d_counters_pad); //In that case, distances is a working data if (directed && !computeDistances) cudaFree(distances); cudaCheckError() ; } template class Bfs<int> ; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/bfs2d.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bfs2d.hxx" #include "bfs2d_kernels.cuh" #include "debug_help.h" namespace nvgraph { using namespace bfs_kernels; template<typename GlobalType, typename LocalType, typename ValueType> NVGRAPH_ERROR Bfs2d<GlobalType, LocalType, ValueType>::setup() { // Setup the frontier and visited bitmaps int32_t offset = M->getMatrixDecompositionDescription().getOffset(); int32_t bitmap_n = (offset + 31) / 32; const MatrixDecompositionDescription<GlobalType, LocalType>* descr; descr = &(M->getMatrixDecompositionDescription()); frontier_bmap = new VertexData2D<GlobalType, LocalType, int32_t>(descr, bitmap_n); visited_bmap = new VertexData2D<GlobalType, LocalType, int32_t>(descr, bitmap_n); // Setup frontier and frontierSize frontier = new VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>(descr); trim_frontier = new VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>(descr); frontierSize = new VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>(descr, 1); frontierSize_h.resize(descr->getNumBlocks()); frontierDegree_h.resize(descr->getNumBlocks()); degreeFlags = new VertexData2D_Unbuffered<GlobalType, LocalType, int8_t>(descr); // Setup the 2d distances and predecessors distances = new VertexData2D<GlobalType, LocalType, int32_t>(descr); predecessors = new VertexData2D<GlobalType, LocalType, GlobalType>(descr); // Setup degree exclusive sum and cub storage space LocalType n_exSum = offset + 1; size_t temp_bytes = getCubExclusiveSumStorageSize(n_exSum); size_t temp_bytes_compact = getCubSelectFlaggedStorageSize(n_exSum - 1); if (temp_bytes_compact > temp_bytes) temp_bytes = temp_bytes_compact; exSumStorage = new VertexData2D_Unbuffered<GlobalType, LocalType, int8_t>(descr, temp_bytes); exSumDegree = new VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>(descr, offset + 1); // Setup bucketOffsets. Size is based on nnz, so we find the largest nnz over all blocks and use that. int32_t numBlocks = descr->getNumBlocks(); size_t blockNnz = 0; for (int32_t i = 0; i < numBlocks; i++) { MultiValuedCsrGraph<LocalType, ValueType>* block = M->getBlockMatrix(i); blockNnz = max(block->get_num_edges(), blockNnz); } size_t bucketAllocSize = ((blockNnz / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2); bucketOffsets = new VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>(descr, bucketAllocSize); // Size bucketOffsets based on blockNnz return NVGRAPH_OK; } template<typename GlobalType, typename LocalType, typename ValueType> NVGRAPH_ERROR Bfs2d<GlobalType, LocalType, ValueType>::configure(GlobalType *_distances, GlobalType *_predecessors) { // Set the output locations. distances_out = _distances; predecessors_out = _predecessors; return NVGRAPH_OK; } template<typename GlobalType, typename LocalType, typename ValueType> void Bfs2d<GlobalType, LocalType, ValueType>::clean() { // Delete allocated data: if (distances) delete distances; if (predecessors) delete predecessors; if (frontier_bmap) delete frontier_bmap; if (visited_bmap) delete visited_bmap; if (frontier) delete frontier; if (trim_frontier) delete trim_frontier; if (frontierSize) delete frontierSize; if (exSumDegree) delete exSumDegree; if (exSumStorage) delete exSumStorage; if (bucketOffsets) delete bucketOffsets; if (degreeFlags) delete degreeFlags; } template<typename GlobalType, typename LocalType, typename ValueType> NVGRAPH_ERROR Bfs2d<GlobalType, LocalType, ValueType>::traverse(GlobalType source_vertex) { // Setup and get references for things const MatrixDecompositionDescription<GlobalType, LocalType>& description = M->getMatrixDecompositionDescription(); const std::vector<int32_t>& deviceAssignments = description.getDeviceAssignments(); const std::vector<cudaStream_t>& blockStreams = description.getBlockStreams(); int32_t numBlocks = description.getNumBlocks(); LocalType offset = description.getOffset(); int32_t current_device; cudaGetDevice(&current_device); // Initialize the frontier bitmap with the source vertex set frontier_bmap->fillElements(0); LocalType blockRow = source_vertex / offset; LocalType blockOffset = source_vertex % offset; LocalType intId = blockOffset / 32; LocalType bitOffset = blockOffset % 32; int32_t bmapElement = 1 << bitOffset; int32_t bId = description.getBlockId(blockRow, blockRow); int32_t* copyTo = frontier_bmap->getCurrent(bId) + intId; cudaMemcpy(copyTo, &bmapElement, sizeof(int32_t), cudaMemcpyDefault); frontier_bmap->rowScatter(); // Initialize frontierSizes to zero frontierSize->fillElements(0); frontierSize->rowScatter(); // Initialize the visited bitmap with the source vertex set frontier_bmap->copyTo(visited_bmap); visited_bmap->columnScatter(); // Initialize the distances and predecessors distances->fillElements((LocalType) -1); distances->setElement(source_vertex, (LocalType) 0); distances->columnScatter(); predecessors->fillElements((GlobalType) -1); predecessors->columnScatter(); // Setup initial frontier from bitmap frontier for (int i = 0; i < numBlocks; i++) { cudaStream_t stream = blockStreams[i]; int32_t device = deviceAssignments[i]; cudaSetDevice(device); convert_bitmap_to_queue(frontier_bmap->getCurrent(i), frontier_bmap->getN(), offset, frontier->get(i), frontierSize->get(i), stream); cudaMemcpyAsync(&frontierSize_h[i], frontierSize->get(i), sizeof(LocalType), cudaMemcpyDefault, stream); } description.syncAllStreams(); // Main iteration loop int32_t globalSources = 1; LocalType level = 1; while (globalSources > 0) { // std::cout << "Starting with level " << level << "\n"; // Remove frontier nodes with locally zero degree for (int i = 0; i < numBlocks; i++) { // Checking that there is work to be done for this block if (frontierSize_h[i] > 0) { // Write out the degree of each frontier node into exSumDegree degreeIterator<LocalType> degreeIt(M->getBlockMatrix(i)->get_raw_row_offsets()); cudaStream_t stream = blockStreams[i]; cudaSetDevice(deviceAssignments[i]); set_degree_flags( degreeFlags->get(i), frontier->get(i), degreeIt, frontierSize_h[i], stream); // set_frontier_degree(exSumDegree->get(i), // frontier->get(i), // degreeIt, // frontierSize_h[i], // stream); // // cudaStreamSynchronize(stream); // std::cout << "Block " << i << " before compaction.\n"; // debug::printDeviceVector(frontier->get(i), frontierSize_h[i], "Frontier"); // debug::printDeviceVector(exSumDegree->get(i), frontierSize_h[i], "Frontier Degree"); // Use degreeIterator as flags to compact the frontier cudaSetDevice(deviceAssignments[i]); size_t numBytes = exSumStorage->getN(); cub::DeviceSelect::Flagged(exSumStorage->get(i), numBytes, frontier->get(i), degreeFlags->get(i), trim_frontier->get(i), frontierSize->get(i), frontierSize_h[i], stream); cudaMemcpyAsync(&frontierSize_h[i], frontierSize->get(i), sizeof(LocalType), cudaMemcpyDefault, stream); } } description.syncAllStreams(); // Setup load balancing for main kernel call for (int i = 0; i < numBlocks; i++) { // Checking that there is work to be done for this block: if (frontierSize_h[i] > 0) { // Write out the degree of each frontier node into exSumDegree degreeIterator<LocalType> degreeIt(M->getBlockMatrix(i)->get_raw_row_offsets()); cudaStream_t stream = blockStreams[i]; cudaSetDevice(deviceAssignments[i]); set_frontier_degree(exSumDegree->get(i), trim_frontier->get(i), degreeIt, frontierSize_h[i], stream); // cudaStreamSynchronize(stream); // std::cout << "Block " << i << " after compaction.\n"; // debug::printDeviceVector(trim_frontier->get(i), frontierSize_h[i], "Frontier"); // debug::printDeviceVector(exSumDegree->get(i), frontierSize_h[i], "Frontier Degree"); // Get the exclusive sum of the frontier degrees, store in exSumDegree size_t numBytes = exSumStorage->getN(); cub::DeviceScan::ExclusiveSum(exSumStorage->get(i), numBytes, exSumDegree->get(i), exSumDegree->get(i), frontierSize_h[i] + 1, stream); cudaMemcpyAsync(&frontierDegree_h[i], exSumDegree->get(i) + frontierSize_h[i], sizeof(LocalType), cudaMemcpyDefault, stream); } } description.syncAllStreams(); // for (int i = 0; i < numBlocks; i++) { // std::cout << "Block " << i << " frontierNodes " << frontierSize_h[i] // << " frontierDegree " << frontierDegree_h[i] << "\n"; // } for (int i = 0; i < numBlocks; i++) { // Checking that there is work to be done for this block: if (frontierSize_h[i] > 0) { cudaStream_t stream = blockStreams[i]; cudaSetDevice(deviceAssignments[i]); compute_bucket_offsets(exSumDegree->get(i), bucketOffsets->get(i), frontierSize_h[i], frontierDegree_h[i], stream); } } // Call main kernel to get new frontier frontier_bmap->fillElements(0); frontier_bmap->rowScatter(); for (int i = 0; i < numBlocks; i++) { // Checking that there is work to be done for this block: if (frontierDegree_h[i] > 0) { cudaSetDevice(deviceAssignments[i]); frontier_expand(M->getBlockMatrix(i)->get_raw_row_offsets(), M->getBlockMatrix(i)->get_raw_column_indices(), trim_frontier->get(i), frontierSize_h[i], frontierDegree_h[i], level, frontier_bmap->getCurrent(i), exSumDegree->get(i), bucketOffsets->get(i), visited_bmap->getCurrent(i), distances->getCurrent(i), predecessors->getCurrent(i), blockStreams[i]); // cudaStreamSynchronize(blockStreams[i]); // int bitsSet = // thrust::reduce(thrust::device, // thrust::make_transform_iterator(frontier_bmap->getCurrent(i), // popCount()), // thrust::make_transform_iterator(frontier_bmap->getCurrent(i) // + frontier_bmap->getN(), // popCount())); // std::cout << "Block " << i << " Level " << level << " has " << bitsSet << " bits set\n"; } } description.syncAllStreams(); // Update and propogate new frontier and visited bitmaps frontier_bmap->template columnReduce<BitwiseOr>(); frontier_bmap->rowScatter(); visited_bmap->template columnReduce<BitwiseOr>(); visited_bmap->columnScatter(); // Convert bitmap frontier to list frontier and update globalSources frontierSize->fillElements(0); frontierSize->rowScatter(); for (int i = 0; i < numBlocks; i++) { cudaStream_t stream = blockStreams[i]; int32_t device = deviceAssignments[i]; cudaSetDevice(device); convert_bitmap_to_queue(frontier_bmap->getCurrent(i), frontier_bmap->getN(), offset, frontier->get(i), frontierSize->get(i), stream); cudaMemcpyAsync(&frontierSize_h[i], frontierSize->get(i), sizeof(LocalType), cudaMemcpyDefault, stream); } description.syncAllStreams(); GlobalType blockRows = description.getBlockRows(); globalSources = 0; for (int i = 0; i < blockRows; i++) { int32_t bId = description.getBlockId(i, i); globalSources += frontierSize_h[bId]; } // std::cout << "Finished with level " << level << " frontiers:\n"; // for (int i = 0; i < numBlocks; i++) // std::cout << "\tBlock " << i << " : " << frontierSize_h[i] << "\n"; // Increment level level++; } // Globalize the predecessors by row for (int i = 0; i < numBlocks; i++) { cudaStream_t stream = blockStreams[i]; int32_t device = deviceAssignments[i]; cudaSetDevice(device); int32_t rowId = description.getBlockRow(i); GlobalType globalOffset = rowId * description.getOffset(); globalize_ids(predecessors->getCurrent(i), globalOffset, (GlobalType) predecessors->getN(), stream); } description.syncAllStreams(); // Propogate predecessors and distances predecessors->template columnReduce<predMerge>(); distances->template columnReduce<predMerge>(); // Copy out predecessors and distances to user provided locations LocalType* temp = (LocalType*) malloc(distances->getN() * sizeof(LocalType)); int32_t writeOffset = 0; int32_t numRows = description.getNumRows(); int32_t blockRows = description.getBlockRows(); for (int i = 0; i < blockRows; i++) { // Copy out the data for the block on the diagonal int32_t bId = description.getBlockId(i, i); int32_t n = predecessors->getN(); cudaMemcpy(temp, predecessors->getCurrent(bId), n * sizeof(LocalType), cudaMemcpyDefault); for (int j = 0; j < n; j++) { if (writeOffset + j < numRows) predecessors_out[writeOffset + j] = temp[j]; } cudaMemcpy(temp, distances->getCurrent(bId), n * sizeof(LocalType), cudaMemcpyDefault); for (int j = 0; j < n; j++) { if (writeOffset + j < numRows) distances_out[writeOffset + j] = temp[j]; } writeOffset += n; } return NVGRAPH_OK; } template<typename GlobalType, typename LocalType, typename ValueType> NVGRAPH_ERROR Bfs2d<GlobalType, LocalType, ValueType>::traverse(GlobalType *source_vertices, int32_t nsources) { for (int32_t i = 0; i < nsources; i++) { traverse(source_vertices[i]); } return NVGRAPH_OK; } template class Bfs2d<int, int, int> ; }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/jaccard_gpu.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Jaccard symilarity edge weights // Author: Alexandre Fender afender@nvidia.com and Maxim Naumov. #include "graph_utils.cuh" #include "jaccard_gpu.cuh" namespace nvlouvain { //#define CUDA_MAX_BLOCKS 65535 //#define CUDA_MAX_KERNEL_THREADS 256 //kernel will launch at most 256 threads per block //#define DEFAULT_MASK 0xffffffff // Volume of neighboors (*weight_s) template<bool weighted, typename T> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_row_sum(int n, int e, int *csrPtr, int *csrInd, T *v, T *work) { int row,start,end,length; T sum; for (row=threadIdx.y+blockIdx.y*blockDim.y; row<n; row+=gridDim.y*blockDim.y) { start = csrPtr[row]; end = csrPtr[row+1]; length= end-start; //compute row sums if (weighted) { sum = parallel_prefix_sum(length, csrInd + start, v); if (threadIdx.x == 0) work[row] = sum; } else { work[row] = (T)length; } } } // Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) template<bool weighted, typename T> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_is(int n, int e, int *csrPtr, int *csrInd, T *v, T *work, T *weight_i, T *weight_s) { int i,j,row,col,Ni,Nj; int ref,cur,ref_col,cur_col,match; T ref_val; for (row=threadIdx.z+blockIdx.z*blockDim.z; row<n; row+=gridDim.z*blockDim.z) { for (j=csrPtr[row]+threadIdx.y+blockIdx.y*blockDim.y; j<csrPtr[row+1]; j+=gridDim.y*blockDim.y) { col = csrInd[j]; //find which row has least elements (and call it reference row) Ni = csrPtr[row+1] - csrPtr[row]; Nj = csrPtr[col+1] - csrPtr[col]; ref= (Ni < Nj) ? row : col; cur= (Ni < Nj) ? col : row; //compute new sum weights weight_s[j] = work[row] + work[col]; //compute new intersection weights //search for the element with the same column index in the reference row for (i=csrPtr[ref]+threadIdx.x+blockIdx.x*blockDim.x; i<csrPtr[ref+1]; i+=gridDim.x*blockDim.x) { match =-1; ref_col = csrInd[i]; if (weighted) { ref_val = v[ref_col]; } else { ref_val = 1.0; } //binary search (column indices are sorted within each row) int left = csrPtr[cur]; int right= csrPtr[cur+1]-1; while(left <= right){ int middle = (left+right)>>1; cur_col= csrInd[middle]; if (cur_col > ref_col) { right=middle-1; } else if (cur_col < ref_col) { left=middle+1; } else { match = middle; break; } } //if the element with the same column index in the reference row has been found if (match != -1){ atomicAdd(&weight_i[j],ref_val); } } } } } //Jaccard weights (*weight) template<bool weighted, typename T> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_jw(int n, int e, int *csrPtr, int *csrInd, T *csrVal, T *v, T gamma, T *weight_i, T *weight_s, T *weight_j) { int j; T Wi,Ws,Wu; for (j=threadIdx.x+blockIdx.x*blockDim.x; j<e; j+=gridDim.x*blockDim.x) { Wi = weight_i[j]; Ws = weight_s[j]; Wu = Ws - Wi; weight_j[j] = (gamma*csrVal[j])* (Wi/Wu); } } template<bool weighted, typename T> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_jw(int n, int e, int *csrPtr, int *csrInd, T *v, T *weight_i, T *weight_s, T *weight_j) { int j; T Wi,Ws,Wu; for (j=threadIdx.x+blockIdx.x*blockDim.x; j<e; j+=gridDim.x*blockDim.x) { Wi = weight_i[j]; Ws = weight_s[j]; Wu = Ws - Wi; weight_j[j] = (Wi/Wu); } } template <bool weighted, typename T> int jaccard(int n, int e, int *csrPtr, int *csrInd, T * csrVal, T *v, T *work, T gamma, T *weight_i, T *weight_s, T *weight_j) { dim3 nthreads, nblocks; int y=4; //setup launch configuration nthreads.x = 32/y; nthreads.y = y; nthreads.z = 1; nblocks.x = 1; nblocks.y = min((n + nthreads.y - 1)/nthreads.y,CUDA_MAX_BLOCKS); nblocks.z = 1; //launch kernel jaccard_row_sum<weighted,T><<<nblocks,nthreads>>>(n,e,csrPtr,csrInd,v,work); fill(e,weight_i,(T)0.0); //setup launch configuration nthreads.x = 32/y; nthreads.y = y; nthreads.z = 8; nblocks.x = 1; nblocks.y = 1; nblocks.z = min((n + nthreads.z - 1)/nthreads.z,CUDA_MAX_BLOCKS); //1; //launch kernel jaccard_is<weighted,T><<<nblocks,nthreads>>>(n,e,csrPtr,csrInd,v,work,weight_i,weight_s); //setup launch configuration nthreads.x = min(e,CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((e + nthreads.x - 1)/nthreads.x,CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; //launch kernel if (csrVal != NULL) jaccard_jw<weighted,T><<<nblocks,nthreads>>>(n,e,csrPtr,csrInd,csrVal,v,gamma,weight_i,weight_s,weight_j); else jaccard_jw<weighted,T><<<nblocks,nthreads>>>(n,e,csrPtr,csrInd,v,weight_i,weight_s,weight_j); return 0; } //template int jaccard<true, half> ( int n, int e, int *csrPtr, int *csrInd, half *csrVal, half *v, half *work, half gamma, half *weight_i, half *weight_s, half *weight_j); //template int jaccard<false, half> ( int n, int e, int *csrPtr, int *csrInd, half *csrVal, half *v, half *work, half gamma, half *weight_i, half *weight_s, half *weight_j); template int jaccard<true, float> ( int n, int e, int *csrPtr, int *csrInd, float *csrVal, float *v, float *work, float gamma, float *weight_i, float *weight_s, float *weight_j); template int jaccard<false, float> ( int n, int e, int *csrPtr, int *csrInd, float *csrVal, float *v, float *work, float gamma, float *weight_i, float *weight_s, float *weight_j); template int jaccard<true, double> (int n, int e, int *csrPtr, int *csrInd, double *csrVal, double *v, double *work, double gamma, double *weight_i, double *weight_s, double *weight_j); template int jaccard<false, double> (int n, int e, int *csrPtr, int *csrInd, double *csrVal, double *v, double *work, double gamma, double *weight_i, double *weight_s, double *weight_j); } //namespace nvga
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/nvgraph_cublas.cpp
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <nvgraph_cublas.hxx> namespace nvgraph { cublasHandle_t Cublas::m_handle = 0; namespace { cublasStatus_t cublas_axpy(cublasHandle_t handle, int n, const float* alpha, const float* x, int incx, float* y, int incy) { return cublasSaxpy(handle, n, alpha, x, incx, y, incy); } cublasStatus_t cublas_axpy(cublasHandle_t handle, int n, const double* alpha, const double* x, int incx, double* y, int incy) { return cublasDaxpy(handle, n, alpha, x, incx, y, incy); } cublasStatus_t cublas_copy(cublasHandle_t handle, int n, const float* x, int incx, float* y, int incy) { return cublasScopy(handle, n, x, incx, y, incy); } cublasStatus_t cublas_copy(cublasHandle_t handle, int n, const double* x, int incx, double* y, int incy) { return cublasDcopy(handle, n, x, incx, y, incy); } cublasStatus_t cublas_dot(cublasHandle_t handle, int n, const float* x, int incx, const float* y, int incy, float* result) { return cublasSdot(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_dot(cublasHandle_t handle, int n, const double* x, int incx, const double* y, int incy, double* result) { return cublasDdot(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_trsv_v2(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const float *A, int lda, float *x, int incx) { return cublasStrsv (handle, uplo, trans, diag, n, A, lda, x, incx); } cublasStatus_t cublas_trsv_v2(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const double *A, int lda, double *x, int incx) { return cublasDtrsv (handle, uplo, trans, diag, n, A, lda, x, incx); } cublasStatus_t cublas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc) { return cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } cublasStatus_t cublas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc) { return cublasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } cublasStatus_t cublas_gemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float* y, int incy) { return cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } cublasStatus_t cublas_gemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const double *alpha, const double *A, int lda, const double *x, int incx, const double *beta, double* y, int incy) { return cublasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } cublasStatus_t cublas_ger(cublasHandle_t handle, int m, int n, const float* alpha, const float* x, int incx, const float* y, int incy, float* A, int lda) { return cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_ger(cublasHandle_t handle, int m, int n, const double* alpha, const double* x, int incx, const double* y, int incy, double *A, int lda) { return cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_nrm2(cublasHandle_t handle, int n, const float *x, int incx, float *result) { return cublasSnrm2(handle, n, x, incx, result); } cublasStatus_t cublas_nrm2(cublasHandle_t handle, int n, const double *x, int incx, double *result) { return cublasDnrm2(handle, n, x, incx, result); } cublasStatus_t cublas_scal(cublasHandle_t handle, int n, const float* alpha, float* x, int incx) { return cublasSscal(handle, n, alpha, x, incx); } cublasStatus_t cublas_scal(cublasHandle_t handle, int n, const double* alpha, double* x, int incx) { return cublasDscal(handle, n, alpha, x, incx); } cublasStatus_t cublas_geam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const float * alpha, const float * A, int lda, const float * beta, const float * B, int ldb, float * C, int ldc) { return cublasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } cublasStatus_t cublas_geam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const double * alpha, const double * A, int lda, const double * beta, const double * B, int ldb, double * C, int ldc) { return cublasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } } // anonymous namespace. void Cublas::set_pointer_mode_device() { cublasHandle_t handle = Cublas::get_handle(); cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE); } void Cublas::set_pointer_mode_host() { cublasHandle_t handle = Cublas::get_handle(); cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST); } template <typename T> void Cublas::axpy(int n, T alpha, const T* x, int incx, T* y, int incy) { cublasHandle_t handle = Cublas::get_handle(); CHECK_CUBLAS(cublas_axpy(handle, n, &alpha, x, incx, y, incy)); } template <typename T> void Cublas::copy(int n, const T* x, int incx, T* y, int incy) { cublasHandle_t handle = Cublas::get_handle(); CHECK_CUBLAS(cublas_copy(handle, n, x, incx, y, incy)); } template <typename T> void Cublas::dot(int n, const T* x, int incx, const T* y, int incy, T* result) { cublasHandle_t handle = Cublas::get_handle(); CHECK_CUBLAS(cublas_dot(handle, n, x, incx, y, incy, result)); } template <typename T> T Cublas::nrm2(int n, const T* x, int incx) { Cublas::get_handle(); T result; Cublas::nrm2(n, x, incx, &result); return result; } template <typename T> void Cublas::nrm2(int n, const T* x, int incx, T* result) { cublasHandle_t handle = Cublas::get_handle(); CHECK_CUBLAS(cublas_nrm2(handle, n, x, incx, result)); } template <typename T> void Cublas::scal(int n, T alpha, T* x, int incx) { Cublas::scal(n, &alpha, x, incx); } template <typename T> void Cublas::scal(int n, T* alpha, T* x, int incx) { cublasHandle_t handle = Cublas::get_handle(); CHECK_CUBLAS(cublas_scal(handle, n, alpha, x, incx)); } template <typename T> void Cublas::gemv(bool transposed, int m, int n, const T* alpha, const T* A, int lda, const T* x, int incx, const T* beta, T* y, int incy) { cublasHandle_t handle = Cublas::get_handle(); cublasOperation_t trans = transposed ? CUBLAS_OP_T : CUBLAS_OP_N; CHECK_CUBLAS(cublas_gemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy)); } template <typename T> void Cublas::gemv_ext(bool transposed, const int m, const int n, const T* alpha, const T* A, const int lda, const T* x, const int incx, const T* beta, T* y, const int incy, const int offsetx, const int offsety, const int offseta) { cublasHandle_t handle = Cublas::get_handle(); cublasOperation_t trans = transposed ? CUBLAS_OP_T : CUBLAS_OP_N; CHECK_CUBLAS(cublas_gemv(handle, trans, m, n, alpha, A+offseta, lda, x+offsetx, incx, beta, y+offsety, incy)); } template <typename T> void Cublas::trsv_v2( cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const T *A, int lda, T *x, int incx, int offseta) { cublasHandle_t handle = Cublas::get_handle(); CHECK_CUBLAS( cublas_trsv_v2(handle, uplo, trans, diag, n, A+offseta, lda, x, incx)); } template <typename T> void Cublas::ger(int m, int n, const T* alpha, const T* x, int incx, const T* y, int incy, T* A, int lda) { cublasHandle_t handle = Cublas::get_handle(); CHECK_CUBLAS(cublas_ger(handle, m, n, alpha, x, incx, y, incy, A, lda)); } template <typename T> void Cublas::gemm(bool transa, bool transb, int m, int n, int k, const T * alpha, const T * A, int lda, const T * B, int ldb, const T * beta, T * C, int ldc) { cublasHandle_t handle = Cublas::get_handle(); cublasOperation_t cublasTransA = transa ? CUBLAS_OP_T : CUBLAS_OP_N; cublasOperation_t cublasTransB = transb ? CUBLAS_OP_T : CUBLAS_OP_N; CHECK_CUBLAS(cublas_gemm(handle, cublasTransA, cublasTransB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc)); } template <typename T> void Cublas::geam(bool transa, bool transb, int m, int n, const T * alpha, const T * A, int lda, const T * beta, const T * B, int ldb, T * C, int ldc) { cublasHandle_t handle = Cublas::get_handle(); cublasOperation_t cublasTransA = transa ? CUBLAS_OP_T : CUBLAS_OP_N; cublasOperation_t cublasTransB = transb ? CUBLAS_OP_T : CUBLAS_OP_N; CHECK_CUBLAS(cublas_geam(handle, cublasTransA, cublasTransB, m, n, alpha, A, lda, beta, B, ldb, C, ldc)); } template void Cublas::axpy(int n, float alpha, const float* x, int incx, float* y, int incy); template void Cublas::axpy(int n, double alpha, const double* x, int incx, double* y, int incy); template void Cublas::copy(int n, const float* x, int incx, float* y, int incy); template void Cublas::copy(int n, const double* x, int incx, double* y, int incy); template void Cublas::dot(int n, const float* x, int incx, const float* y, int incy, float* result); template void Cublas::dot(int n, const double* x, int incx, const double* y, int incy, double* result); template void Cublas::gemv(bool transposed, int m, int n, const float* alpha, const float* A, int lda, const float* x, int incx, const float* beta, float* y, int incy); template void Cublas::gemv(bool transposed, int m, int n, const double* alpha, const double* A, int lda, const double* x, int incx, const double* beta, double* y, int incy); template void Cublas::ger(int m, int n, const float* alpha, const float* x, int incx, const float* y, int incy, float* A, int lda); template void Cublas::ger(int m, int n, const double* alpha, const double* x, int incx, const double* y, int incy, double* A, int lda); template void Cublas::gemv_ext(bool transposed, const int m, const int n, const float* alpha, const float* A, const int lda, const float* x, const int incx, const float* beta, float* y, const int incy, const int offsetx, const int offsety, const int offseta); template void Cublas::gemv_ext(bool transposed, const int m, const int n, const double* alpha, const double* A, const int lda, const double* x, const int incx, const double* beta, double* y, const int incy, const int offsetx, const int offsety, const int offseta); template void Cublas::trsv_v2( cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const float *A, int lda, float *x, int incx, int offseta); template void Cublas::trsv_v2( cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const double *A, int lda, double *x, int incx, int offseta); template double Cublas::nrm2(int n, const double* x, int incx); template float Cublas::nrm2(int n, const float* x, int incx); template void Cublas::scal(int n, float alpha, float* x, int incx); template void Cublas::scal(int n, double alpha, double* x, int incx); template void Cublas::gemm(bool transa, bool transb, int m, int n, int k, const float * alpha, const float * A, int lda, const float * B, int ldb, const float * beta, float * C, int ldc); template void Cublas::gemm(bool transa, bool transb, int m, int n, int k, const double * alpha, const double * A, int lda, const double * B, int ldb, const double * beta, double * C, int ldc); template void Cublas::geam(bool transa, bool transb, int m, int n, const float * alpha, const float * A, int lda, const float * beta, const float * B, int ldb, float * C, int ldc); template void Cublas::geam(bool transa, bool transb, int m, int n, const double * alpha, const double * A, int lda, const double * beta, const double * B, int ldb, double * C, int ldc); } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/kmeans.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#ifdef NVGRAPH_PARTITION //#ifdef DEBUG #include "kmeans.hxx" #include <stdio.h> #include <time.h> #include <math.h> #include <cuda.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/binary_search.h> #include <thrust/sort.h> #include <thrust/reduce.h> #include <thrust/random.h> #include <thrust/gather.h> #include "nvgraph_vector.hxx" #include "nvgraph_cublas.hxx" #include "atomics.hxx" #include "sm_utils.h" #include "debug_macros.h" using namespace nvgraph; // ========================================================= // Useful macros // ========================================================= #define BLOCK_SIZE 1024 #define WARP_SIZE 32 #define BSIZE_DIV_WSIZE (BLOCK_SIZE/WARP_SIZE) // Get index of matrix entry #define IDX(i,j,lda) ((i)+(j)*(lda)) namespace { // ========================================================= // CUDA kernels // ========================================================= /// Compute distances between observation vectors and centroids /** Block dimensions should be (warpSize, 1, * blockSize/warpSize). Ideally, the grid is large enough so there * are d threads in the x-direction, k threads in the y-direction, * and n threads in the z-direction. * * @param n Number of observation vectors. * @param d Dimension of observation vectors. * @param k Number of clusters. * @param obs (Input, d*n entries) Observation matrix. Matrix is * stored column-major and each column is an observation * vector. Matrix dimensions are d x n. * @param centroids (Input, d*k entries) Centroid matrix. Matrix is * stored column-major and each column is a centroid. Matrix * dimensions are d x k. * @param dists (Output, n*k entries) Distance matrix. Matrix is * stored column-major and the (i,j)-entry is the square of the * Euclidean distance between the ith observation vector and jth * centroid. Matrix dimensions are n x k. Entries must be * initialized to zero. */ template <typename IndexType_, typename ValueType_> static __global__ void computeDistances(IndexType_ n, IndexType_ d, IndexType_ k, const ValueType_ * __restrict__ obs, const ValueType_ * __restrict__ centroids, ValueType_ * __restrict__ dists) { // Loop index IndexType_ i; // Block indices IndexType_ bidx; // Global indices IndexType_ gidx, gidy, gidz; // Private memory ValueType_ centroid_private, dist_private; // Global x-index indicates index of vector entry bidx = blockIdx.x; while(bidx*blockDim.x < d) { gidx = threadIdx.x + bidx*blockDim.x; // Global y-index indicates centroid gidy = threadIdx.y + blockIdx.y*blockDim.y; while(gidy < k) { // Load centroid coordinate from global memory centroid_private = (gidx < d) ? centroids[IDX(gidx,gidy,d)] : 0; // Global z-index indicates observation vector gidz = threadIdx.z + blockIdx.z*blockDim.z; while(gidz < n) { // Load observation vector coordinate from global memory dist_private = (gidx < d) ? obs[IDX(gidx,gidz,d)] : 0; // Compute contribution of current entry to distance dist_private = centroid_private - dist_private; dist_private = dist_private*dist_private; // Perform reduction on warp for(i=WARP_SIZE/2; i>0; i/=2) dist_private += utils::shfl_down(dist_private, i, 2*i); // Write result to global memory if(threadIdx.x == 0) atomicFPAdd(dists+IDX(gidz,gidy,n), dist_private); // Move to another observation vector gidz += blockDim.z*gridDim.z; } // Move to another centroid gidy += blockDim.y*gridDim.y; } // Move to another vector entry bidx += gridDim.x; } } /// Find closest centroid to observation vectors /** Block and grid dimensions should be 1-dimensional. Ideally the * grid is large enough so there are n threads. * * @param n Number of observation vectors. * @param k Number of clusters. * @param centroids (Input, d*k entries) Centroid matrix. Matrix is * stored column-major and each column is a centroid. Matrix * dimensions are d x k. * @param dists (Input/output, n*k entries) Distance matrix. Matrix * is stored column-major and the (i,j)-entry is the square of * the Euclidean distance between the ith observation vector and * jth centroid. Matrix dimensions are n x k. On exit, the first * n entries give the square of the Euclidean distance between * observation vectors and closest centroids. * @param codes (Output, n entries) Cluster assignments. * @param clusterSizes (Output, k entries) Number of points in each * cluster. Entries must be initialized to zero. */ template <typename IndexType_, typename ValueType_> static __global__ void minDistances(IndexType_ n, IndexType_ k, ValueType_ * __restrict__ dists, IndexType_ * __restrict__ codes, IndexType_ * __restrict__ clusterSizes) { // Loop index IndexType_ i, j; // Current matrix entry ValueType_ dist_curr; // Smallest entry in row ValueType_ dist_min; IndexType_ code_min; // Each row in observation matrix is processed by a thread i = threadIdx.x + blockIdx.x*blockDim.x; while(i<n) { // Find minimum entry in row code_min = 0; dist_min = dists[IDX(i,0,n)]; for(j=1; j<k; ++j) { dist_curr = dists[IDX(i,j,n)]; code_min = (dist_curr<dist_min) ? j : code_min; dist_min = (dist_curr<dist_min) ? dist_curr : dist_min; } // Transfer result to global memory dists[i] = dist_min; codes[i] = code_min; // Increment cluster sizes atomicAdd(clusterSizes+code_min, 1); // Move to another row i += blockDim.x*gridDim.x; } } /// Check if newly computed distances are smaller than old distances /** Block and grid dimensions should be 1-dimensional. Ideally the * grid is large enough so there are n threads. * * @param n Number of observation vectors. * @param dists_old (Input/output, n entries) Distances between * observation vectors and closest centroids. On exit, entries * are replaced by entries in 'dists_new' if the corresponding * observation vectors are closest to the new centroid. * @param dists_new (Input, n entries) Distance between observation * vectors and new centroid. * @param codes_old (Input/output, n entries) Cluster * assignments. On exit, entries are replaced with 'code_new' if * the corresponding observation vectors are closest to the new * centroid. * @param code_new Index associated with new centroid. */ template <typename IndexType_, typename ValueType_> static __global__ void minDistances2(IndexType_ n, ValueType_ * __restrict__ dists_old, const ValueType_ * __restrict__ dists_new, IndexType_ * __restrict__ codes_old, IndexType_ code_new) { // Loop index IndexType_ i; // Distances ValueType_ dist_old_private; ValueType_ dist_new_private; // Each row is processed by a thread i = threadIdx.x + blockIdx.x*blockDim.x; while(i<n) { // Get old and new distances dist_old_private = dists_old[i]; dist_new_private = dists_new[i]; // Update if new distance is smaller than old distance if(dist_new_private < dist_old_private) { dists_old[i] = dist_new_private; codes_old[i] = code_new; } // Move to another row i += blockDim.x*gridDim.x; } } /// Compute size of k-means clusters /** Block and grid dimensions should be 1-dimensional. Ideally the * grid is large enough so there are n threads. * * @param n Number of observation vectors. * @param k Number of clusters. * @param codes (Input, n entries) Cluster assignments. * @param clusterSizes (Output, k entries) Number of points in each * cluster. Entries must be initialized to zero. */ template <typename IndexType_> static __global__ void computeClusterSizes(IndexType_ n, IndexType_ k, const IndexType_ * __restrict__ codes, IndexType_ * __restrict__ clusterSizes) { IndexType_ i = threadIdx.x + blockIdx.x*blockDim.x; while(i<n) { atomicAdd(clusterSizes+codes[i], 1); i += blockDim.x*gridDim.x; } } /// Divide rows of centroid matrix by cluster sizes /** Divides the ith column of the sum matrix by the size of the ith * cluster. If the sum matrix has been initialized so that the ith * row is the sum of all observation vectors in the ith cluster, * this kernel produces cluster centroids. The grid and block * dimensions should be 2-dimensional. Ideally the grid is large * enough so there are d threads in the x-direction and k threads * in the y-direction. * * @param d Dimension of observation vectors. * @param k Number of clusters. * @param clusterSizes (Input, k entries) Number of points in each * cluster. * @param centroids (Input/output, d*k entries) Sum matrix. Matrix * is stored column-major and matrix dimensions are d x k. The * ith column is the sum of all observation vectors in the ith * cluster. On exit, the matrix is the centroid matrix (each * column is the mean position of a cluster). */ template <typename IndexType_, typename ValueType_> static __global__ void divideCentroids(IndexType_ d, IndexType_ k, const IndexType_ * __restrict__ clusterSizes, ValueType_ * __restrict__ centroids) { // Global indices IndexType_ gidx, gidy; // Current cluster size IndexType_ clusterSize_private; // Observation vector is determined by global y-index gidy = threadIdx.y + blockIdx.y*blockDim.y; while(gidy < k) { // Get cluster size from global memory clusterSize_private = clusterSizes[gidy]; // Add vector entries to centroid matrix // Vector entris are determined by global x-index gidx = threadIdx.x + blockIdx.x*blockDim.x; while(gidx < d) { centroids[IDX(gidx,gidy,d)] /= clusterSize_private; gidx += blockDim.x*gridDim.x; } // Move to another centroid gidy += blockDim.y*gridDim.y; } } // ========================================================= // Helper functions // ========================================================= /// Randomly choose new centroids /** Centroid is randomly chosen with k-means++ algorithm. * * @param n Number of observation vectors. * @param d Dimension of observation vectors. * @param k Number of clusters. * @param rand Random number drawn uniformly from [0,1). * @param obs (Input, device memory, d*n entries) Observation * matrix. Matrix is stored column-major and each column is an * observation vector. Matrix dimensions are n x d. * @param dists (Input, device memory, 2*n entries) Workspace. The * first n entries should be the distance between observation * vectors and the closest centroid. * @param centroid (Output, device memory, d entries) Centroid * coordinates. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int chooseNewCentroid(IndexType_ n, IndexType_ d, IndexType_ k, ValueType_ rand, const ValueType_ * __restrict__ obs, ValueType_ * __restrict__ dists, ValueType_ * __restrict__ centroid) { using namespace thrust; // Cumulative sum of distances ValueType_ * distsCumSum = dists + n; // Residual sum of squares ValueType_ distsSum; // Observation vector that is chosen as new centroid IndexType_ obsIndex; // Compute cumulative sum of distances inclusive_scan(device_pointer_cast(dists), device_pointer_cast(dists+n), device_pointer_cast(distsCumSum)); cudaCheckError(); CHECK_CUDA(cudaMemcpy(&distsSum, distsCumSum+n-1, sizeof(ValueType_), cudaMemcpyDeviceToHost)); // Randomly choose observation vector // Probabilities are proportional to square of distance to closest // centroid (see k-means++ algorithm) obsIndex = (lower_bound(device_pointer_cast(distsCumSum), device_pointer_cast(distsCumSum+n), distsSum*rand) - device_pointer_cast(distsCumSum)); cudaCheckError(); obsIndex = max(obsIndex, 0); obsIndex = min(obsIndex, n-1); // Record new centroid position CHECK_CUDA(cudaMemcpyAsync(centroid, obs+IDX(0,obsIndex,d), d*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); return 0; } /// Choose initial cluster centroids for k-means algorithm /** Centroids are randomly chosen with k-means++ algorithm * * @param n Number of observation vectors. * @param d Dimension of observation vectors. * @param k Number of clusters. * @param obs (Input, device memory, d*n entries) Observation * matrix. Matrix is stored column-major and each column is an * observation vector. Matrix dimensions are d x n. * @param centroids (Output, device memory, d*k entries) Centroid * matrix. Matrix is stored column-major and each column is a * centroid. Matrix dimensions are d x k. * @param codes (Output, device memory, n entries) Cluster * assignments. * @param clusterSizes (Output, device memory, k entries) Number of * points in each cluster. * @param dists (Output, device memory, 2*n entries) Workspace. On * exit, the first n entries give the square of the Euclidean * distance between observation vectors and the closest centroid. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int initializeCentroids(IndexType_ n, IndexType_ d, IndexType_ k, const ValueType_ * __restrict__ obs, ValueType_ * __restrict__ centroids, IndexType_ * __restrict__ codes, IndexType_ * __restrict__ clusterSizes, ValueType_ * __restrict__ dists) { // ------------------------------------------------------- // Variable declarations // ------------------------------------------------------- // Loop index IndexType_ i; // CUDA grid dimensions dim3 blockDim_warp, gridDim_warp, gridDim_block; // Random number generator thrust::default_random_engine rng(123456); thrust::uniform_real_distribution<ValueType_> uniformDist(0,1); // ------------------------------------------------------- // Implementation // ------------------------------------------------------- // Initialize grid dimensions blockDim_warp.x = WARP_SIZE; blockDim_warp.y = 1; blockDim_warp.z = BSIZE_DIV_WSIZE; gridDim_warp.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535); gridDim_warp.y = 1; gridDim_warp.z = min((n+BSIZE_DIV_WSIZE-1)/BSIZE_DIV_WSIZE, 65535); gridDim_block.x = min((n+BLOCK_SIZE-1)/BLOCK_SIZE, 65535); gridDim_block.y = 1; gridDim_block.z = 1; // Assign observation vectors to code 0 CHECK_CUDA(cudaMemsetAsync(codes, 0, n*sizeof(IndexType_))); // Choose first centroid thrust::fill(thrust::device_pointer_cast(dists), thrust::device_pointer_cast(dists+n), 1); cudaCheckError(); if(chooseNewCentroid(n, d, k, uniformDist(rng), obs, dists, centroids)) WARNING("error in k-means++ (could not pick centroid)"); // Compute distances from first centroid CHECK_CUDA(cudaMemsetAsync(dists, 0, n*sizeof(ValueType_))); computeDistances <<< gridDim_warp, blockDim_warp >>> (n, d, 1, obs, centroids, dists); cudaCheckError() // Choose remaining centroids for(i=1; i<k; ++i) { // Choose ith centroid if(chooseNewCentroid(n, d, k, uniformDist(rng),obs, dists, centroids+IDX(0,i,d))) WARNING("error in k-means++ (could not pick centroid)"); // Compute distances from ith centroid CHECK_CUDA(cudaMemsetAsync(dists+n, 0, n*sizeof(ValueType_))); computeDistances <<< gridDim_warp, blockDim_warp >>> (n, d, 1, obs, centroids+IDX(0,i,d), dists+n); cudaCheckError(); // Recompute minimum distances minDistances2 <<< gridDim_block, BLOCK_SIZE >>> (n, dists, dists+n, codes, i); cudaCheckError(); } // Compute cluster sizes CHECK_CUDA(cudaMemsetAsync(clusterSizes, 0, k*sizeof(IndexType_))); computeClusterSizes <<< gridDim_block, BLOCK_SIZE >>> (n, k, codes, clusterSizes); cudaCheckError(); return 0; } /// Find cluster centroids closest to observation vectors /** Distance is measured with Euclidean norm. * * @param n Number of observation vectors. * @param d Dimension of observation vectors. * @param k Number of clusters. * @param obs (Input, device memory, d*n entries) Observation * matrix. Matrix is stored column-major and each column is an * observation vector. Matrix dimensions are d x n. * @param centroids (Input, device memory, d*k entries) Centroid * matrix. Matrix is stored column-major and each column is a * centroid. Matrix dimensions are d x k. * @param dists (Output, device memory, n*k entries) Workspace. On * exit, the first n entries give the square of the Euclidean * distance between observation vectors and the closest centroid. * @param codes (Output, device memory, n entries) Cluster * assignments. * @param clusterSizes (Output, device memory, k entries) Number of * points in each cluster. * @param residual_host (Output, host memory, 1 entry) Residual sum * of squares of assignment. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int assignCentroids(IndexType_ n, IndexType_ d, IndexType_ k, const ValueType_ * __restrict__ obs, const ValueType_ * __restrict__ centroids, ValueType_ * __restrict__ dists, IndexType_ * __restrict__ codes, IndexType_ * __restrict__ clusterSizes, ValueType_ * residual_host) { // CUDA grid dimensions dim3 blockDim, gridDim; // Compute distance between centroids and observation vectors CHECK_CUDA(cudaMemsetAsync(dists, 0, n*k*sizeof(ValueType_))); blockDim.x = WARP_SIZE; blockDim.y = 1; blockDim.z = BLOCK_SIZE/WARP_SIZE; gridDim.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535); gridDim.y = min(k, 65535); gridDim.z = min((n+BSIZE_DIV_WSIZE-1)/BSIZE_DIV_WSIZE, 65535); computeDistances <<< gridDim, blockDim >>> (n, d, k, obs, centroids, dists); cudaCheckError(); // Find centroid closest to each observation vector CHECK_CUDA(cudaMemsetAsync(clusterSizes,0,k*sizeof(IndexType_))); blockDim.x = BLOCK_SIZE; blockDim.y = 1; blockDim.z = 1; gridDim.x = min((n+BLOCK_SIZE-1)/BLOCK_SIZE, 65535); gridDim.y = 1; gridDim.z = 1; minDistances <<< gridDim, blockDim >>> (n, k, dists, codes, clusterSizes); cudaCheckError(); // Compute residual sum of squares *residual_host = thrust::reduce(thrust::device_pointer_cast(dists), thrust::device_pointer_cast(dists+n)); return 0; } /// Update cluster centroids for k-means algorithm /** All clusters are assumed to be non-empty. * * @param n Number of observation vectors. * @param d Dimension of observation vectors. * @param k Number of clusters. * @param obs (Input, device memory, d*n entries) Observation * matrix. Matrix is stored column-major and each column is an * observation vector. Matrix dimensions are d x n. * @param codes (Input, device memory, n entries) Cluster * assignments. * @param clusterSizes (Input, device memory, k entries) Number of * points in each cluster. * @param centroids (Output, device memory, d*k entries) Centroid * matrix. Matrix is stored column-major and each column is a * centroid. Matrix dimensions are d x k. * @param work (Output, device memory, n*d entries) Workspace. * @param work_int (Output, device memory, 2*d*n entries) * Workspace. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int updateCentroids(IndexType_ n, IndexType_ d, IndexType_ k, const ValueType_ * __restrict__ obs, const IndexType_ * __restrict__ codes, const IndexType_ * __restrict__ clusterSizes, ValueType_ * __restrict__ centroids, ValueType_ * __restrict__ work, IndexType_ * __restrict__ work_int) { using namespace thrust; // ------------------------------------------------------- // Variable declarations // ------------------------------------------------------- // Useful constants const ValueType_ one = 1; const ValueType_ zero = 0; // CUDA grid dimensions dim3 blockDim, gridDim; // Device memory device_ptr<ValueType_> obs_copy(work); device_ptr<IndexType_> codes_copy(work_int); device_ptr<IndexType_> rows(work_int+d*n); // Take transpose of observation matrix Cublas::geam(true, false, n, d, &one, obs, d, &zero, (ValueType_*) NULL, n, raw_pointer_cast(obs_copy), n); // Cluster assigned to each observation matrix entry sequence(rows, rows+d*n); cudaCheckError(); transform(rows, rows+d*n, make_constant_iterator<IndexType_>(n), rows, modulus<IndexType_>()); cudaCheckError(); gather(rows, rows+d*n, device_pointer_cast(codes), codes_copy); cudaCheckError(); // Row associated with each observation matrix entry sequence(rows, rows+d*n); cudaCheckError(); transform(rows, rows+d*n, make_constant_iterator<IndexType_>(n), rows, divides<IndexType_>()); cudaCheckError(); // Sort and reduce to add observation vectors in same cluster stable_sort_by_key(codes_copy, codes_copy+d*n, make_zip_iterator(make_tuple(obs_copy, rows))); cudaCheckError(); reduce_by_key(rows, rows+d*n, obs_copy, codes_copy, // Output to codes_copy is ignored device_pointer_cast(centroids)); cudaCheckError(); // Divide sums by cluster size to get centroid matrix blockDim.x = WARP_SIZE; blockDim.y = BLOCK_SIZE/WARP_SIZE; blockDim.z = 1; gridDim.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535); gridDim.y = min((k+BSIZE_DIV_WSIZE-1)/BSIZE_DIV_WSIZE, 65535); gridDim.z = 1; divideCentroids <<< gridDim, blockDim >>> (d, k, clusterSizes, centroids); cudaCheckError(); return 0; } } namespace nvgraph { // ========================================================= // k-means algorithm // ========================================================= /// Find clusters with k-means algorithm /** Initial centroids are chosen with k-means++ algorithm. Empty * clusters are reinitialized by choosing new centroids with * k-means++ algorithm. * * @param n Number of observation vectors. * @param d Dimension of observation vectors. * @param k Number of clusters. * @param tol Tolerance for convergence. k-means stops when the * change in residual divided by n is less than tol. * @param maxiter Maximum number of k-means iterations. * @param obs (Input, device memory, d*n entries) Observation * matrix. Matrix is stored column-major and each column is an * observation vector. Matrix dimensions are d x n. * @param codes (Output, device memory, n entries) Cluster * assignments. * @param clusterSizes (Output, device memory, k entries) Number of * points in each cluster. * @param centroids (Output, device memory, d*k entries) Centroid * matrix. Matrix is stored column-major and each column is a * centroid. Matrix dimensions are d x k. * @param work (Output, device memory, n*max(k,d) entries) * Workspace. * @param work_int (Output, device memory, 2*d*n entries) * Workspace. * @param residual_host (Output, host memory, 1 entry) Residual sum * of squares (sum of squares of distances between observation * vectors and centroids). * @param iters_host (Output, host memory, 1 entry) Number of * k-means iterations. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR kmeans(IndexType_ n, IndexType_ d, IndexType_ k, ValueType_ tol, IndexType_ maxiter, const ValueType_ * __restrict__ obs, IndexType_ * __restrict__ codes, IndexType_ * __restrict__ clusterSizes, ValueType_ * __restrict__ centroids, ValueType_ * __restrict__ work, IndexType_ * __restrict__ work_int, ValueType_ * residual_host, IndexType_ * iters_host) { // ------------------------------------------------------- // Variable declarations // ------------------------------------------------------- // Current iteration IndexType_ iter; // Residual sum of squares at previous iteration ValueType_ residualPrev = 0; // Random number generator thrust::default_random_engine rng(123456); thrust::uniform_real_distribution<ValueType_> uniformDist(0,1); // ------------------------------------------------------- // Initialization // ------------------------------------------------------- // Check that parameters are valid if(n < 1) { WARNING("invalid parameter (n<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(d < 1) { WARNING("invalid parameter (d<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(k < 1) { WARNING("invalid parameter (k<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxiter < 0) { WARNING("invalid parameter (maxiter<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Trivial cases if(k == 1) { CHECK_CUDA(cudaMemsetAsync(codes, 0, n*sizeof(IndexType_))); CHECK_CUDA(cudaMemcpyAsync(clusterSizes, &n, sizeof(IndexType_), cudaMemcpyHostToDevice)); if(updateCentroids(n, d, k, obs, codes, clusterSizes, centroids, work, work_int)) WARNING("could not compute k-means centroids"); dim3 blockDim, gridDim; blockDim.x = WARP_SIZE; blockDim.y = 1; blockDim.z = BLOCK_SIZE/WARP_SIZE; gridDim.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535); gridDim.y = 1; gridDim.z = min((n+BLOCK_SIZE/WARP_SIZE-1)/(BLOCK_SIZE/WARP_SIZE), 65535); CHECK_CUDA(cudaMemsetAsync(work, 0, n*k*sizeof(ValueType_))); computeDistances <<< gridDim, blockDim >>> (n, d, 1, obs, centroids, work); cudaCheckError(); *residual_host = thrust::reduce(thrust::device_pointer_cast(work), thrust::device_pointer_cast(work+n)); cudaCheckError(); return NVGRAPH_OK; } if(n <= k) { thrust::sequence(thrust::device_pointer_cast(codes), thrust::device_pointer_cast(codes+n)); cudaCheckError(); thrust::fill_n(thrust::device_pointer_cast(clusterSizes), n, 1); cudaCheckError(); if(n < k) CHECK_CUDA(cudaMemsetAsync(clusterSizes+n, 0, (k-n)*sizeof(IndexType_))); CHECK_CUDA(cudaMemcpyAsync(centroids, obs, d*n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); *residual_host = 0; return NVGRAPH_OK; } // Initialize cuBLAS Cublas::set_pointer_mode_host(); // ------------------------------------------------------- // k-means++ algorithm // ------------------------------------------------------- // Choose initial cluster centroids if(initializeCentroids(n, d, k, obs, centroids, codes, clusterSizes, work)) WARNING("could not initialize k-means centroids"); // Apply k-means iteration until convergence for(iter=0; iter<maxiter; ++iter) { // Update cluster centroids if(updateCentroids(n, d, k, obs, codes, clusterSizes, centroids, work, work_int)) WARNING("could not update k-means centroids"); // Determine centroid closest to each observation residualPrev = *residual_host; if(assignCentroids(n, d, k, obs, centroids, work, codes, clusterSizes, residual_host)) WARNING("could not assign observation vectors to k-means clusters"); // Reinitialize empty clusters with new centroids IndexType_ emptyCentroid = (thrust::find(thrust::device_pointer_cast(clusterSizes), thrust::device_pointer_cast(clusterSizes+k), 0) - thrust::device_pointer_cast(clusterSizes)); while(emptyCentroid < k) { if(chooseNewCentroid(n, d, k, uniformDist(rng), obs, work, centroids+IDX(0,emptyCentroid,d))) WARNING("could not replace empty centroid"); if(assignCentroids(n, d, k, obs, centroids, work, codes, clusterSizes, residual_host)) WARNING("could not assign observation vectors to k-means clusters"); emptyCentroid = (thrust::find(thrust::device_pointer_cast(clusterSizes), thrust::device_pointer_cast(clusterSizes+k), 0) - thrust::device_pointer_cast(clusterSizes)); cudaCheckError(); } // Check for convergence if(fabs(residualPrev-(*residual_host))/n < tol) { ++iter; break; } } // Warning if k-means has failed to converge if(fabs(residualPrev-(*residual_host))/n >= tol) WARNING("k-means failed to converge"); *iters_host = iter; return NVGRAPH_OK; } /// Find clusters with k-means algorithm /** Initial centroids are chosen with k-means++ algorithm. Empty * clusters are reinitialized by choosing new centroids with * k-means++ algorithm. * * CNMEM must be initialized before calling this function. * * @param n Number of observation vectors. * @param d Dimension of observation vectors. * @param k Number of clusters. * @param tol Tolerance for convergence. k-means stops when the * change in residual divided by n is less than tol. * @param maxiter Maximum number of k-means iterations. * @param obs (Input, device memory, d*n entries) Observation * matrix. Matrix is stored column-major and each column is an * observation vector. Matrix dimensions are d x n. * @param codes (Output, device memory, n entries) Cluster * assignments. * @param residual On exit, residual sum of squares (sum of squares * of distances between observation vectors and centroids). * @param On exit, number of k-means iterations. * @return NVGRAPH error flag */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR kmeans(IndexType_ n, IndexType_ d, IndexType_ k, ValueType_ tol, IndexType_ maxiter, const ValueType_ * __restrict__ obs, IndexType_ * __restrict__ codes, ValueType_ & residual, IndexType_ & iters) { // Check that parameters are valid if(n < 1) { WARNING("invalid parameter (n<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(d < 1) { WARNING("invalid parameter (d<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(k < 1) { WARNING("invalid parameter (k<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxiter < 0) { WARNING("invalid parameter (maxiter<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Allocate memory // TODO: handle non-zero CUDA streams cudaStream_t stream = 0; Vector<IndexType_> clusterSizes(k, stream); Vector<ValueType_> centroids(d*k, stream); Vector<ValueType_> work(n*max(k,d), stream); Vector<IndexType_> work_int(2*d*n, stream); // Perform k-means return kmeans<IndexType_,ValueType_>(n, d, k, tol, maxiter, obs, codes, clusterSizes.raw(), centroids.raw(), work.raw(), work_int.raw(), &residual, &iters); } // ========================================================= // Explicit instantiations // ========================================================= template NVGRAPH_ERROR kmeans<int, float>(int n, int d, int k, float tol, int maxiter, const float * __restrict__ obs, int * __restrict__ codes, float & residual, int & iters); template NVGRAPH_ERROR kmeans<int, double>(int n, int d, int k, double tol, int maxiter, const double * __restrict__ obs, int * __restrict__ codes, double & residual, int & iters); } //#endif //NVGRAPH_PARTITION //#endif //debug
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/pagerank.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#define NEW_CSRMV #include "valued_csr_graph.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_cusparse.hxx" #include "nvgraph_cublas.hxx" #include "nvgraph_error.hxx" #include "pagerank.hxx" #include "pagerank_kernels.hxx" #ifdef NEW_CSRMV #include "csrmv_cub.h" #include "cub_semiring/cub.cuh" #endif #include "nvgraph_csrmv.hxx" #include <algorithm> #include <iomanip> #include "debug_macros.h" #ifdef DEBUG #define PR_VERBOSE #endif namespace nvgraph { template <typename IndexType_, typename ValueType_> Pagerank<IndexType_, ValueType_>::Pagerank(const ValuedCsrGraph <IndexType, ValueType>& network, Vector<ValueType>& dangling_nodes, cudaStream_t stream) :m_network(network), m_a(dangling_nodes), m_stream(stream) { // initialize cuda libs outside of the solve (this is slow) Cusparse::get_handle(); Cublas::get_handle(); m_residual = 1000.0; m_damping_factor = 0.0; } template <typename IndexType_, typename ValueType_> void Pagerank<IndexType_, ValueType_>::setup(ValueType damping_factor, Vector<ValueType>& initial_guess, Vector<ValueType>& pagerank_vector) { int n = static_cast<int>(m_network.get_num_vertices()); // int nnz = static_cast<int>(m_network.get_num_edges()); #ifdef DEBUG if (n != static_cast<int>(initial_guess.get_size()) || n != static_cast<int>(m_a.get_size()) || n != static_cast<int>(pagerank_vector.get_size())) { CERR() << "n : " << n << std::endl; CERR() << "m_network.get_num_edges() " << m_network.get_num_edges() << std::endl; CERR() << "m_a : " << m_a.get_size() << std::endl; CERR() << "initial_guess.get_size() : " << initial_guess.get_size() << std::endl; CERR() << "pagerank_vector.get_size() : " << pagerank_vector.get_size() << std::endl; FatalError("Wrong input vector in Pagerank solver.", NVGRAPH_ERR_BAD_PARAMETERS); } #endif if (damping_factor > 0.999 || damping_factor < 0.0001) FatalError("Wrong damping factor value in Pagerank solver.", NVGRAPH_ERR_BAD_PARAMETERS); m_damping_factor = damping_factor; m_tmp = initial_guess; m_pagerank = pagerank_vector; //dump(m_a.raw(), 100, 0); update_dangling_nodes(n, m_a.raw(), this->m_damping_factor, m_stream); //dump(m_a.raw(), 100, 0); m_b.allocate(n, m_stream); //m_b.dump(0,n); ValueType_ val = static_cast<ValueType_>( 1.0/n); //fill_raw_vec(m_b.raw(), n, val); // auto b = m_b.raw(); m_b.fill(val, m_stream); // WARNING force initialization of the initial guess //fill(m_tmp.raw(), n, 1.1); } template <typename IndexType_, typename ValueType_> bool Pagerank<IndexType_, ValueType_>::solve_it() { int n = static_cast<int>(m_network.get_num_vertices()), nnz = static_cast<int>(m_network.get_num_edges()); int inc = 1; ValueType_ dot_res; ValueType *a = m_a.raw(), *b = m_b.raw(), *pr = m_pagerank.raw(), *tmp = m_tmp.raw(); // normalize the input vector (tmp) if(m_iterations == 0) Cublas::scal(n, (ValueType_)1.0/Cublas::nrm2(n, tmp, inc) , tmp, inc); //spmv : pr = network * tmp #ifdef NEW_CSRMV ValueType_ alpha = cub_semiring::cub::PlusTimesSemiring<ValueType_>::times_ident(); // 1. ValueType_ beta = cub_semiring::cub::PlusTimesSemiring<ValueType_>::times_null(); // 0. SemiringDispatch<IndexType_, ValueType_>::template Dispatch< cub_semiring::cub::PlusTimesSemiring<ValueType_> >( m_network.get_raw_values(), m_network.get_raw_row_offsets(), m_network.get_raw_column_indices(), tmp, pr, alpha, beta, n, n, nnz, m_stream); #else ValueType_ alpha = 1.0, beta =0.0; #if __cplusplus > 199711L Semiring SR = Semiring::PlusTimes; #else Semiring SR = PlusTimes; #endif csrmv_mp<IndexType_, ValueType_>(n, n, nnz, alpha, m_network, tmp, beta, pr, SR, m_stream); #endif // Rank one updates Cublas::scal(n, m_damping_factor, pr, inc); Cublas::dot(n, a, inc, tmp, inc, &dot_res); Cublas::axpy(n, dot_res, b, inc, pr, inc); // CVG check // we need to normalize pr to compare it to tmp // (tmp has been normalized and overwitted at the beginning) Cublas::scal(n, (ValueType_)1.0/Cublas::nrm2(n, pr, inc) , pr, inc); // v = v - x Cublas::axpy(n, (ValueType_)-1.0, pr, inc, tmp, inc); m_residual = Cublas::nrm2(n, tmp, inc); if (m_residual < m_tolerance) // We know lambda = 1 for Pagerank { // CONVERGED // WARNING Norm L1 is more standard for the output of PageRank //m_pagerank.dump(0,m_pagerank.get_size()); Cublas::scal(m_pagerank.get_size(), (ValueType_)1.0/m_pagerank.nrm1(m_stream), pr, inc); return true; } else { // m_pagerank.dump(0,m_pagerank.get_size()); std::swap(m_pagerank, m_tmp); return false; } } template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR Pagerank<IndexType_, ValueType_>::solve(ValueType damping_factor, Vector<ValueType>& initial_guess, Vector<ValueType>& pagerank_vector, float tolerance, int max_it) { #ifdef PR_VERBOSE std::stringstream ss; ss.str(std::string()); size_t used_mem, free_mem, total_mem; ss <<" ------------------PageRank------------------"<< std::endl; ss <<" --------------------------------------------"<< std::endl; ss << std::setw(10) << "Iteration" << std::setw(20) << " Mem Usage (MB)" << std::setw(15) << "Residual" << std::endl; ss <<" --------------------------------------------"<< std::endl; COUT()<<ss.str(); cuda_timer timer; timer.start(); #endif m_max_it = max_it; m_tolerance = static_cast<ValueType_>(tolerance); setup(damping_factor, initial_guess, pagerank_vector); bool converged = false; int i = 0; while (!converged && i < m_max_it) { m_iterations = i; converged = solve_it(); i++; #ifdef PR_VERBOSE ss.str(std::string()); cnmemMemGetInfo(&free_mem, &total_mem, NULL); used_mem=total_mem-free_mem; ss << std::setw(10) << i ; ss.precision(3); ss << std::setw(20) << std::fixed << used_mem/1024.0/1024.0; ss << std::setw(15) << std::scientific << m_residual << std::endl; COUT()<<ss.str(); #endif } m_iterations = i; #ifdef PR_VERBOSE COUT() <<" --------------------------------------------"<< std::endl; //stop timer COUT() <<" Total Time : "<< timer.stop() << "ms"<<std::endl; COUT() <<" --------------------------------------------"<< std::endl; #endif if (converged) { pagerank_vector = m_pagerank; } else { // still return something even if we didn't converged Cublas::scal(m_pagerank.get_size(), (ValueType_)1.0/m_tmp.nrm1(m_stream), m_tmp.raw(), 1); pagerank_vector = m_tmp; } //m_pagerank.dump(0,m_pagerank.get_size()); //pagerank_vector.dump(0,pagerank_vector.get_size()); return converged ? NVGRAPH_OK : NVGRAPH_ERR_NOT_CONVERGED; } template class Pagerank<int, double>; template class Pagerank<int, float>; // init : // We actually need the transpose (=converse =reverse) of the original network, if the inuput is the original network then we have to transopose it // b is a constant and uniform vector, b = 1.0/num_vertices // a is a constant vector that initialy store the dangling nodes then we set : a = alpha*a + (1-alpha)e // pagerank is 0 // tmp is random // alpha is a constant scalar (0.85 usually) //loop : // pagerank = csrmv (network, tmp) // scal(pagerank, alpha); //pagerank = alpha*pagerank // gamma = dot(a, tmp); //gamma = a*tmp // pagerank = axpy(b, pagerank, gamma); // pagerank = pagerank+gamma*b // convergence check // tmp = axpby(pagerank, tmp, -1, 1); // tmp = pagerank - tmp // residual_norm = norm(tmp); // if converged (residual_norm) // l1 = l1_norm(pagerank); // pagerank = scal(pagerank, 1/l1); // return pagerank // swap(tmp, pagerank) //end loop } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/matrix.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#ifdef NVGRAPH_PARTITION //#ifdef DEBUG #include "matrix.hxx" #include <thrust/device_vector.h> #include <thrust/transform.h> #include "nvgraph_error.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_cublas.hxx" #include "nvgraph_cusparse.hxx" #include "debug_macros.h" // ========================================================= // Useful macros // ========================================================= // CUDA block size #define BLOCK_SIZE 1024 // Get index of matrix entry #define IDX(i,j,lda) ((i)+(j)*(lda)) namespace nvgraph { // ============================================= // CUDA kernels // ============================================= namespace { /// Apply diagonal matrix to vector template <typename IndexType_, typename ValueType_> static __global__ void diagmv(IndexType_ n, ValueType_ alpha, const ValueType_ * __restrict__ D, const ValueType_ * __restrict__ x, ValueType_ * __restrict__ y) { IndexType_ i = threadIdx.x + blockIdx.x*blockDim.x; while(i<n) { y[i] += alpha*D[i]*x[i]; i += blockDim.x*gridDim.x; } } /// Apply diagonal matrix to a set of dense vectors (tall matrix) template <typename IndexType_, typename ValueType_, bool beta_is_zero> static __global__ void diagmm(IndexType_ n, IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ D, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) { IndexType_ i,j,index; for(j=threadIdx.y+blockIdx.y*blockDim.y; j<k; j+=blockDim.y*gridDim.y) { for(i=threadIdx.x+blockIdx.x*blockDim.x; i<n; i+=blockDim.x*gridDim.x) { index = i+j*n; if (beta_is_zero) { y[index] = alpha*D[i]*x[index]; } else { y[index] = alpha*D[i]*x[index] + beta*y[index]; } } } } } // ============================================= // Dense matrix class // ============================================= /// Constructor for dense matrix class /** @param _trans Whether to transpose matrix. * @param _m Number of rows. * @param _n Number of columns. * @param _A (Input, device memory, _m*_n entries) Matrix * entries, stored column-major. * @param _lda Leading dimension of _A. */ template <typename IndexType_, typename ValueType_> DenseMatrix<IndexType_,ValueType_> ::DenseMatrix(bool _trans, IndexType_ _m, IndexType_ _n, const ValueType_ * _A, IndexType_ _lda) : Matrix<IndexType_,ValueType_>(_m,_n), trans(_trans), A(_A), lda(_lda) { Cublas::set_pointer_mode_host(); if(_lda<_m) FatalError("invalid dense matrix parameter (lda<m)", NVGRAPH_ERR_BAD_PARAMETERS); } /// Destructor for dense matrix class template <typename IndexType_, typename ValueType_> DenseMatrix<IndexType_,ValueType_>::~DenseMatrix() {} /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_,ValueType_> ::setCUDAStream(cudaStream_t _s) { this->s = _s; //printf("DenseMatrix setCUDAStream stream=%p\n",this->s); Cublas::setStream(_s); } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_,ValueType_> ::getCUDAStream(cudaStream_t *_s) { *_s = this->s; //CHECK_CUBLAS(cublasGetStream(cublasHandle, _s)); } /// Matrix-vector product for dense matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_,ValueType_> ::mv(ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { Cublas::gemv(this->trans, this->m, this->n, &alpha, this->A, this->lda, x, 1, &beta, y, 1); } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_,ValueType_> ::mm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { Cublas::gemm(this->trans, false, this->m, k, this->n, &alpha, A, lda, x, this->m, &beta, y, this->n); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_,ValueType_> ::color(IndexType_ *c, IndexType_ *p) const { } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_,ValueType_> ::reorder(IndexType_ *p) const { } /// Incomplete Cholesky (setup, factor and solve) template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_,ValueType_> ::prec_setup(Matrix<IndexType_,ValueType_> * _M) { printf("ERROR: DenseMatrix prec_setup dispacthed\n"); //exit(1); } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_,ValueType_> ::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ * __restrict__ fx, ValueType_ * __restrict__ t) const { printf("ERROR: DenseMatrix prec_solve dispacthed\n"); //exit(1); } template <typename IndexType_, typename ValueType_> ValueType_ DenseMatrix<IndexType_, ValueType_> ::getEdgeSum() const { return 0.0; } // ============================================= // CSR matrix class // ============================================= /// Constructor for CSR matrix class /** @param _transA Whether to transpose matrix. * @param _m Number of rows. * @param _n Number of columns. * @param _nnz Number of non-zero entries. * @param _descrA Matrix properties. * @param _csrValA (Input, device memory, _nnz entries) Matrix * entry values. * @param _csrRowPtrA (Input, device memory, _m+1 entries) Pointer * to first entry in each row. * @param _csrColIndA (Input, device memory, _nnz entries) Column * index of each matrix entry. */ template <typename IndexType_, typename ValueType_> CsrMatrix<IndexType_,ValueType_> ::CsrMatrix(bool _trans, bool _sym, IndexType_ _m, IndexType_ _n, IndexType_ _nnz, const cusparseMatDescr_t _descrA, /*const*/ ValueType_ * _csrValA, const IndexType_ * _csrRowPtrA, const IndexType_ * _csrColIndA) : Matrix<IndexType_,ValueType_>(_m,_n), trans(_trans), sym(_sym), nnz(_nnz), descrA(_descrA), csrValA(_csrValA), csrRowPtrA(_csrRowPtrA), csrColIndA(_csrColIndA) { if(nnz<0) FatalError("invalid CSR matrix parameter (nnz<0)", NVGRAPH_ERR_BAD_PARAMETERS); Cusparse::set_pointer_mode_host(); } /// Constructor for CSR matrix class /** @param G Weighted graph in CSR format */ template <typename IndexType_, typename ValueType_> CsrMatrix<IndexType_,ValueType_> ::CsrMatrix( ValuedCsrGraph<IndexType_,ValueType_> & G, const cusparseMatDescr_t _descrA) : Matrix<IndexType_,ValueType_>(G.get_num_vertices(), G.get_num_vertices()), trans(false), sym(false), nnz(G.get_num_edges()), descrA(_descrA), csrValA(G.get_raw_values()), csrRowPtrA(G.get_raw_row_offsets()), csrColIndA(G.get_raw_column_indices()) { Cusparse::set_pointer_mode_host(); } /// Destructor for CSR matrix class template <typename IndexType_, typename ValueType_> CsrMatrix<IndexType_,ValueType_>::~CsrMatrix() {} /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_,ValueType_> ::setCUDAStream(cudaStream_t _s) { this->s = _s; //printf("CsrMatrix setCUDAStream stream=%p\n",this->s); Cusparse::setStream(_s); } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_,ValueType_> ::getCUDAStream(cudaStream_t *_s) { *_s = this->s; //CHECK_CUSPARSE(cusparseGetStream(Cusparse::get_handle(), _s)); } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_,ValueType_> ::mm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { //CHECK_CUSPARSE(cusparseXcsrmm(Cusparse::get_handle(), transA, this->m, k, this->n, nnz, &alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, this->n, &beta, y, this->m)); Cusparse::csrmm(this->trans, this->sym, this->m, k, this->n, this->nnz, &alpha, this->csrValA, this->csrRowPtrA, this->csrColIndA, x, this->n, &beta, y, this->m); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_,ValueType_> ::color(IndexType_ *c, IndexType_ *p) const { } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_,ValueType_> ::reorder(IndexType_ *p) const { } /// Incomplete Cholesky (setup, factor and solve) template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_,ValueType_> ::prec_setup(Matrix<IndexType_,ValueType_> * _M) { //printf("CsrMatrix prec_setup dispacthed\n"); if (!factored) { //analyse lower triangular factor CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_l)); CHECK_CUSPARSE(cusparseSetMatFillMode(descrA,CUSPARSE_FILL_MODE_LOWER)); CHECK_CUSPARSE(cusparseSetMatDiagType(descrA,CUSPARSE_DIAG_TYPE_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_analysis(Cusparse::get_handle(),CUSPARSE_OPERATION_NON_TRANSPOSE,this->m,nnz,descrA,csrValA,csrRowPtrA,csrColIndA,info_l)); //analyse upper triangular factor CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_u)); CHECK_CUSPARSE(cusparseSetMatFillMode(descrA,CUSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(cusparseSetMatDiagType(descrA,CUSPARSE_DIAG_TYPE_NON_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_analysis(Cusparse::get_handle(),CUSPARSE_OPERATION_NON_TRANSPOSE,this->m,nnz,descrA,csrValA,csrRowPtrA,csrColIndA,info_u)); //perform csrilu0 (should be slightly faster than csric0) CHECK_CUSPARSE(cusparseXcsrilu0(Cusparse::get_handle(),CUSPARSE_OPERATION_NON_TRANSPOSE,this->m,descrA,csrValA,csrRowPtrA,csrColIndA,info_l)); //set factored flag to true factored=true; } } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_,ValueType_> ::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ * __restrict__ fx, ValueType_ * __restrict__ t) const { //printf("CsrMatrix prec_solve dispacthed (stream %p)\n",this->s); //preconditioning Mx=f (where M = L*U, threfore x=U\(L\f)) //solve lower triangular factor CHECK_CUSPARSE(cusparseSetMatFillMode(descrA,CUSPARSE_FILL_MODE_LOWER)); CHECK_CUSPARSE(cusparseSetMatDiagType(descrA,CUSPARSE_DIAG_TYPE_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_solve(Cusparse::get_handle(),CUSPARSE_OPERATION_NON_TRANSPOSE,this->m,k,alpha,descrA,csrValA,csrRowPtrA,csrColIndA,info_l,fx,this->m,t,this->m)); //solve upper triangular factor CHECK_CUSPARSE(cusparseSetMatFillMode(descrA,CUSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(cusparseSetMatDiagType(descrA,CUSPARSE_DIAG_TYPE_NON_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_solve(Cusparse::get_handle(),CUSPARSE_OPERATION_NON_TRANSPOSE,this->m,k,alpha,descrA,csrValA,csrRowPtrA,csrColIndA,info_u,t,this->m,fx,this->m)); } /// Matrix-vector product for CSR matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_,ValueType_> ::mv(ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { // TODO: consider using merge-path csrmv Cusparse::csrmv(this->trans, this->sym, this->m, this->n, this->nnz, &alpha, this->csrValA, this->csrRowPtrA, this->csrColIndA, x, &beta, y); } template <typename IndexType_, typename ValueType_> ValueType_ CsrMatrix<IndexType_, ValueType_> ::getEdgeSum() const { return 0.0; } // ============================================= // Laplacian matrix class // ============================================= /// Constructor for Laplacian matrix class /** @param A Adjacency matrix */ template <typename IndexType_, typename ValueType_> LaplacianMatrix<IndexType_, ValueType_> ::LaplacianMatrix(/*const*/ Matrix<IndexType_,ValueType_> & _A) : Matrix<IndexType_,ValueType_>(_A.m,_A.n), A(&_A) { // Check that adjacency matrix is square if(_A.m != _A.n) FatalError("cannot construct Laplacian matrix from non-square adjacency matrix", NVGRAPH_ERR_BAD_PARAMETERS); //set CUDA stream this->s = NULL; // Construct degree matrix D.allocate(_A.m,this->s); Vector<ValueType_> ones(this->n,this->s); ones.fill(1.0); _A.mv(1, ones.raw(), 0, D.raw()); // Set preconditioning matrix pointer to NULL M=NULL; } /// Destructor for Laplacian matrix class template <typename IndexType_, typename ValueType_> LaplacianMatrix<IndexType_, ValueType_>::~LaplacianMatrix() {} /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::setCUDAStream(cudaStream_t _s) { this->s = _s; //printf("LaplacianMatrix setCUDAStream stream=%p\n",this->s); A->setCUDAStream(_s); if (M != NULL) { M->setCUDAStream(_s); } } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::getCUDAStream(cudaStream_t * _s) { *_s = this->s; //A->getCUDAStream(_s); } /// Matrix-vector product for Laplacian matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_> ::mv(ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { // Scale result vector if(beta==0) CHECK_CUDA(cudaMemset(y, 0, (this->n)*sizeof(ValueType_))) else if(beta!=1) thrust::transform(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y+this->n), thrust::make_constant_iterator(beta), thrust::device_pointer_cast(y), thrust::multiplies<ValueType_>()); // Apply diagonal matrix dim3 gridDim, blockDim; gridDim.x = min(((this->n)+BLOCK_SIZE-1)/BLOCK_SIZE, 65535); gridDim.y = 1; gridDim.z = 1; blockDim.x = BLOCK_SIZE; blockDim.y = 1; blockDim.z = 1; diagmv <<< gridDim, blockDim , 0, A->s>>> (this->n, alpha, D.raw(), x, y); cudaCheckError(); // Apply adjacency matrix A->mv(-alpha, x, 1, y); } /// Matrix-vector product for Laplacian matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n*k entries) nxk dense matrix. * @param beta Scalar. * @param y (Input/output, device memory, m*k entries) Output mxk dense matrix. */ template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_> ::mm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { // Apply diagonal matrix ValueType_ one = (ValueType_)1.0; this->dm(k,alpha,x,beta,y); // Apply adjacency matrix A->mm(k, -alpha, x, one, y); } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_> ::dm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { IndexType_ t = k*(this->n); dim3 gridDim, blockDim; //setup launch parameters gridDim.x = min(((this->n)+BLOCK_SIZE-1)/BLOCK_SIZE, 65535); gridDim.y = min(k,65535); gridDim.z = 1; blockDim.x = BLOCK_SIZE; blockDim.y = 1; blockDim.z = 1; // Apply diagonal matrix if(beta == 0.0) { //set vectors to 0 (WARNING: notice that you need to set, not scale, because of NaNs corner case) CHECK_CUDA(cudaMemset(y, 0, t*sizeof(ValueType_))); diagmm<IndexType_,ValueType_,true> <<< gridDim, blockDim, 0, A->s >>> (this->n, k, alpha, D.raw(), x, beta, y); } else { diagmm<IndexType_,ValueType_,false><<< gridDim, blockDim, 0, A->s >>> (this->n, k, alpha, D.raw(), x, beta, y); } cudaCheckError(); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_,ValueType_> ::color(IndexType_ *c, IndexType_ *p) const { } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_,ValueType_> ::reorder(IndexType_ *p) const { } /// Solve preconditioned system M x = f for a set of k vectors template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_> ::prec_setup(Matrix<IndexType_,ValueType_> * _M) { //save the pointer to preconditioner M M = _M; if (M != NULL) { //setup the preconditioning matrix M M->prec_setup(NULL); } } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_> ::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ * __restrict__ fx, ValueType_ * __restrict__ t) const { if (M != NULL) { //preconditioning M->prec_solve(k,alpha,fx,t); } } template <typename IndexType_, typename ValueType_> ValueType_ LaplacianMatrix<IndexType_, ValueType_> ::getEdgeSum() const { return 0.0; } // ============================================= // Modularity matrix class // ============================================= /// Constructor for Modularity matrix class /** @param A Adjacency matrix */ template <typename IndexType_, typename ValueType_> ModularityMatrix<IndexType_, ValueType_> ::ModularityMatrix(/*const*/ Matrix<IndexType_,ValueType_> & _A, IndexType_ _nnz) : Matrix<IndexType_,ValueType_>(_A.m,_A.n), A(&_A), nnz(_nnz){ // Check that adjacency matrix is square if(_A.m != _A.n) FatalError("cannot construct Modularity matrix from non-square adjacency matrix", NVGRAPH_ERR_BAD_PARAMETERS); //set CUDA stream this->s = NULL; // Construct degree matrix D.allocate(_A.m,this->s); Vector<ValueType_> ones(this->n,this->s); ones.fill(1.0); _A.mv(1, ones.raw(), 0, D.raw()); // D.dump(0,this->n); edge_sum = D.nrm1(); // Set preconditioning matrix pointer to NULL M=NULL; } /// Destructor for Modularity matrix class template <typename IndexType_, typename ValueType_> ModularityMatrix<IndexType_, ValueType_>::~ModularityMatrix() {} /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::setCUDAStream(cudaStream_t _s) { this->s = _s; //printf("ModularityMatrix setCUDAStream stream=%p\n",this->s); A->setCUDAStream(_s); if (M != NULL) { M->setCUDAStream(_s); } } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::getCUDAStream(cudaStream_t * _s) { *_s = this->s; //A->getCUDAStream(_s); } /// Matrix-vector product for Modularity matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_> ::mv(ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { // Scale result vector if(alpha!=1 || beta!=0) FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); //CHECK_CUBLAS(cublasXdot(handle, this->n, const double *x, int incx, const double *y, int incy, double *result)); // y = A*x A->mv(alpha, x, 0, y); ValueType_ dot_res; //gamma = d'*x Cublas::dot(this->n, D.raw(), 1, x, 1, &dot_res); // y = y -(gamma/edge_sum)*d Cublas::axpy(this->n, -(dot_res/this->edge_sum), D.raw(), 1, y, 1); } /// Matrix-vector product for Modularity matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n*k entries) nxk dense matrix. * @param beta Scalar. * @param y (Input/output, device memory, m*k entries) Output mxk dense matrix. */ template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_> ::mm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_> ::dm(IndexType_ k, ValueType_ alpha, const ValueType_ * __restrict__ x, ValueType_ beta, ValueType_ * __restrict__ y) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_,ValueType_> ::color(IndexType_ *c, IndexType_ *p) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_,ValueType_> ::reorder(IndexType_ *p) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } /// Solve preconditioned system M x = f for a set of k vectors template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_> ::prec_setup(Matrix<IndexType_,ValueType_> * _M) { //save the pointer to preconditioner M M = _M; if (M != NULL) { //setup the preconditioning matrix M M->prec_setup(NULL); } } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_> ::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ * __restrict__ fx, ValueType_ * __restrict__ t) const { if (M != NULL) { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } } template <typename IndexType_, typename ValueType_> ValueType_ ModularityMatrix<IndexType_, ValueType_> ::getEdgeSum() const { return edge_sum; } // Explicit instantiation template class Matrix<int,float>; template class Matrix<int, double>; template class DenseMatrix<int,float>; template class DenseMatrix<int,double>; template class CsrMatrix<int,float>; template class CsrMatrix<int,double>; template class LaplacianMatrix<int,float>; template class LaplacianMatrix<int,double>; template class ModularityMatrix<int,float>; template class ModularityMatrix<int,double>; } //#endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/triangles_counting_kernels.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #ifdef __cplusplus #define __STDC_LIMIT_MACROS 1 #define __STDC_FORMAT_MACROS 1 #endif #include <cuda.h> #include <assert.h> #include <triangles_counting_defines.hxx> #include <triangles_counting_kernels.hxx> #include <nvgraph_error.hxx> #include "cub/cub.cuh" #include <thrust/iterator/counting_iterator.h> #include "sm_utils.h" using namespace cub; #include "cnmem.h" #define TH_CENT_K_LOCLEN (34) #define WP_LEN_TH1 (24) #define WP_LEN_TH2 (2) #if WP_LEN_TH1 > 32 #error WP_LEN_TH1 must be <= 32! #endif template<typename T> __device__ __forceinline__ T LDG(const T* x) { #if __CUDA_ARCH__ < 350 return *x; #else return __ldg(x); #endif } namespace nvgraph { namespace triangles_counting { // hide behind void* tmp_get(size_t size, cudaStream_t stream) { void *t = NULL; cnmemStatus_t status = cnmemMalloc(&t, size, stream); if (status == CNMEM_STATUS_OUT_OF_MEMORY) { FatalError("Not enough memory", NVGRAPH_ERR_NO_MEMORY); } else if (status != CNMEM_STATUS_SUCCESS) { FatalError("Memory manager internal error (alloc)", NVGRAPH_ERR_UNKNOWN); } return t; } void tmp_release(void* ptr, cudaStream_t stream) { cnmemStatus_t status = cnmemFree(ptr, stream); if (status != CNMEM_STATUS_SUCCESS) { FatalError("Memory manager internal error (release)", NVGRAPH_ERR_UNKNOWN); } } // cub utility wrappers //////////////////////////////////////////////////////// template<typename InputIteratorT, typename OutputIteratorT, typename ReductionOpT, typename T> static inline void cubReduce(InputIteratorT d_in, OutputIteratorT d_out, int num_items, ReductionOpT reduction_op, T init, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, reduction_op, init, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, reduction_op, init, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename InputIteratorT, typename OutputIteratorT> static inline void cubSum(InputIteratorT d_in, OutputIteratorT d_out, int num_items, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename KeyT> static inline void cubSortKeys(KeyT *d_keys_in, KeyT *d_keys_out, int num_items, int begin_bit = 0, int end_bit = sizeof(KeyT) * 8, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items, begin_bit, end_bit, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items, begin_bit, end_bit, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename KeyT, typename ValueT> static inline void cubSortPairs(KeyT *d_keys_in, KeyT *d_keys_out, ValueT *d_values_in, ValueT *d_values_out, int num_items, int begin_bit = 0, int end_bit = sizeof(KeyT) * 8, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, begin_bit, end_bit, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, begin_bit, end_bit, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename KeyT, typename ValueT> static inline void cubSortPairsDescending(KeyT *d_keys_in, KeyT *d_keys_out, ValueT *d_values_in, ValueT *d_values_out, int num_items, int begin_bit = 0, int end_bit = sizeof(KeyT) * 8, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, begin_bit, end_bit, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, begin_bit, end_bit, stream, debug_synchronous); tmp_release(d_temp_storage, stream); return; } template<typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT> static inline void cubUnique(InputIteratorT d_in, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, int num_items, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename InputIteratorT, typename UniqueOutputIteratorT, typename LengthsOutputIteratorT, typename NumRunsOutputIteratorT> static inline void cubEncode(InputIteratorT d_in, UniqueOutputIteratorT d_unique_out, LengthsOutputIteratorT d_counts_out, NumRunsOutputIteratorT d_num_runs_out, int num_items, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename InputIteratorT, typename OutputIteratorT> static inline void cubMin(InputIteratorT d_in, OutputIteratorT d_out, int num_items, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename InputIteratorT, typename OutputIteratorT> static inline void cubMax(InputIteratorT d_in, OutputIteratorT d_out, int num_items, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT, typename SelectOp> static inline void cubIf(InputIteratorT d_in, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, int num_items, SelectOp select_op, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename InputIteratorT, typename FlagIterator, typename OutputIteratorT, typename NumSelectedIteratorT> static inline void cubFlagged(InputIteratorT d_in, FlagIterator d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, int num_items, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename InputIteratorT, typename OutputIteratorT> static inline void cubExclusiveSum(InputIteratorT d_in, OutputIteratorT d_out, int num_items, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename InputIteratorT, typename OutputIteratorT> static inline void cubInclusiveSum(InputIteratorT d_in, OutputIteratorT d_out, int num_items, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); cudaCheckError() ; tmp_release(d_temp_storage, stream); return; } template<typename KeysInputIteratorT, typename UniqueOutputIteratorT, typename ValuesInputIteratorT, typename AggregatesOutputIteratorT, typename NumRunsOutputIteratorT, typename ReductionOpT> static inline void cubReduceByKey(KeysInputIteratorT d_keys_in, UniqueOutputIteratorT d_unique_out, ValuesInputIteratorT d_values_in, AggregatesOutputIteratorT d_aggregates_out, NumRunsOutputIteratorT d_num_runs_out, ReductionOpT reduction_op, int num_items, cudaStream_t stream = 0, bool debug_synchronous = false) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items, stream, debug_synchronous); cudaCheckError(); d_temp_storage = tmp_get(temp_storage_bytes, stream); cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items, stream, debug_synchronous); cudaCheckError(); tmp_release(d_temp_storage, stream); return; } template<typename T2> __device__ __host__ inline bool operator==(const T2 &lhs, const T2 &rhs) { return (lhs.x == rhs.x && lhs.y == rhs.y); } ////////////////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ T __block_bcast(const T v, const int x) { __shared__ T shv; __syncthreads(); if (threadIdx.x == x) shv = v; __syncthreads(); return shv; } template<int BDIM_X, int BDIM_Y, int WSIZE, typename T> __device__ __forceinline__ T block_sum(T v) { __shared__ T sh[BDIM_X * BDIM_Y / WSIZE]; const int lid = threadIdx.x % 32; const int wid = threadIdx.x / 32 + ((BDIM_Y > 1) ? threadIdx.y * (BDIM_X / 32) : 0); #pragma unroll for (int i = WSIZE / 2; i; i >>= 1) { v += utils::shfl_down(v, i); } if (lid == 0) sh[wid] = v; __syncthreads(); if (wid == 0) { v = (lid < (BDIM_X * BDIM_Y / WSIZE)) ? sh[lid] : 0; #pragma unroll for (int i = (BDIM_X * BDIM_Y / WSIZE) / 2; i; i >>= 1) { v += utils::shfl_down(v, i); } } return v; } ////////////////////////////////////////////////////////////////////////////////////////// template<int BDIM, int WSIZE, int BWL0, typename ROW_T, typename OFF_T, typename CNT_T, typename MAP_T> __global__ void tricnt_b2b_k(const ROW_T ner, const ROW_T *__restrict__ rows, const OFF_T *__restrict__ roff, const ROW_T *__restrict__ cols, CNT_T *__restrict__ ocnt, MAP_T *__restrict__ bmapL0, const size_t bmldL0, MAP_T *__restrict__ bmapL1, const size_t bmldL1) { CNT_T __cnt = 0; bmapL1 += bmldL1 * blockIdx.x; bmapL0 += bmldL0 * blockIdx.x; for (ROW_T bid = blockIdx.x; bid < ner; bid += gridDim.x) { const OFF_T rbeg = roff[rows[bid]]; const OFF_T rend = roff[rows[bid] + 1]; ROW_T firstcol = 0; ROW_T lastcol = 0; for (OFF_T i = rbeg; i < rend; i += BDIM) { const ROW_T c = (i + threadIdx.x < rend) ? cols[i + threadIdx.x] : -1; __syncthreads(); if (c > -1) { atomicOr(bmapL1 + c / BITSOF(bmapL1), ((MAP_T) 1) << (c % BITSOF(bmapL1))); atomicOr(bmapL0 + c / BWL0 / BITSOF(bmapL0), ((MAP_T) 1) << ((c / BWL0) % BITSOF(bmapL0))); } __syncthreads(); #pragma unroll for (int j = 0; j < BDIM; j++) { const ROW_T curc = __block_bcast(c, j); if (curc == -1) break; lastcol = curc; if ((i == rbeg) && !j) { firstcol = curc; continue; } const OFF_T soff = roff[curc]; const OFF_T eoff = roff[curc + 1]; for (OFF_T k = eoff - 1; k >= soff; k -= BDIM) { if (k - (int) threadIdx.x < soff) break; const ROW_T cc = LDG(cols + k - threadIdx.x); if (cc < firstcol) break; MAP_T mm = ((MAP_T) 1) << ((cc / BWL0) % BITSOF(bmapL0)); if (0 == (bmapL0[cc / BWL0 / BITSOF(bmapL0)] & mm)) continue; mm = ((MAP_T) 1) << (cc % BITSOF(bmapL1)); if (bmapL1[cc / BITSOF(bmapL1)] & mm) { __cnt++; } } } } lastcol /= 64; firstcol /= 64; __syncthreads(); for (int i = rbeg; i < rend; i += BDIM) { if (i + threadIdx.x < rend) { ROW_T c = cols[i + threadIdx.x]; bmapL1[c / BITSOF(bmapL1)] = 0; bmapL0[c / BWL0 / BITSOF(bmapL0)] = 0; } } __syncthreads(); } __cnt = block_sum<BDIM, 1, WSIZE>(__cnt); if (threadIdx.x == 0) ocnt[blockIdx.x] = __cnt; return; } template<typename T> void tricnt_b2b(T nblock, spmat_t<T> *m, uint64_t *ocnt_d, unsigned int *bmapL0_d, size_t bmldL0, unsigned int *bmapL1_d, size_t bmldL1, cudaStream_t stream) { // still best overall (with no psum) tricnt_b2b_k<THREADS, 32, BLK_BWL0> <<<nblock, THREADS, 0, stream>>>(m->nrows, m->rows_d, m->roff_d, m->cols_d, ocnt_d, bmapL0_d, bmldL0, bmapL1_d, bmldL1); cudaCheckError() ; return; } ////////////////////////////////////////////////////////////////////////////////////////// template<int BDIM_X, int BDIM_Y, int WSIZE, typename T> __device__ __forceinline__ T block_sum_sh(T v, T *sh) { const int lid = threadIdx.x % 32; const int wid = threadIdx.x / 32 + ((BDIM_Y > 1) ? threadIdx.y * (BDIM_X / 32) : 0); #pragma unroll for (int i = WSIZE / 2; i; i >>= 1) { v += utils::shfl_down(v, i); } if (lid == 0) sh[wid] = v; __syncthreads(); if (wid == 0) { v = (lid < (BDIM_X * BDIM_Y / WSIZE)) ? sh[lid] : 0; #pragma unroll for (int i = (BDIM_X * BDIM_Y / WSIZE) / 2; i; i >>= 1) { v += utils::shfl_down(v, i); } } return v; } template<int BDIM, int WSIZE, typename ROW_T, typename OFF_T, typename CNT_T> __global__ void tricnt_bsh_k(const ROW_T ner, const ROW_T *__restrict__ rows, const OFF_T *__restrict__ roff, const ROW_T *__restrict__ cols, CNT_T *__restrict__ ocnt, const size_t bmld) { CNT_T __cnt = 0; extern __shared__ unsigned int shm[]; for (int i = 0; i < bmld; i += BDIM) { if (i + threadIdx.x < bmld) { shm[i + threadIdx.x] = 0; } } for (ROW_T bid = blockIdx.x; bid < ner; bid += gridDim.x) { const OFF_T rbeg = roff[rows[bid]]; const OFF_T rend = roff[rows[bid] + 1]; ROW_T firstcol = 0; ROW_T lastcol = 0; for (OFF_T i = rbeg; i < rend; i += BDIM) { const ROW_T c = (i + threadIdx.x < rend) ? cols[i + threadIdx.x] : -1; __syncthreads(); if (c > -1) atomicOr(shm + c / BITSOF(shm), 1u << (c % BITSOF(shm))); __syncthreads(); #pragma unroll for (int j = 0; j < BDIM; j++) { const ROW_T curc = __block_bcast(c, j); if (curc == -1) break; lastcol = curc; if ((i == rbeg) && !j) { firstcol = curc; continue; } const OFF_T soff = roff[curc]; const OFF_T eoff = roff[curc + 1]; for (OFF_T k = eoff - 1; k >= soff; k -= BDIM) { if (k - (int) threadIdx.x < soff) break; const ROW_T cc = LDG(cols + k - threadIdx.x); if (cc < firstcol) break; const unsigned int mm = 1u << (cc % BITSOF(shm)); if (shm[cc / BITSOF(shm)] & mm) { __cnt++; } } } } lastcol /= 64; firstcol /= 64; __syncthreads(); if (lastcol - firstcol < rend - rbeg) { for (int i = firstcol; i <= lastcol; i += BDIM) { if (i + threadIdx.x <= lastcol) { ((unsigned long long *) shm)[i + threadIdx.x] = 0ull; } } } else { for (int i = rbeg; i < rend; i += BDIM) { if (i + threadIdx.x < rend) { shm[cols[i + threadIdx.x] / BITSOF(shm)] = 0; } } } __syncthreads(); } __cnt = block_sum_sh<BDIM, 1, WSIZE>(__cnt, (uint64_t *) shm); if (threadIdx.x == 0) ocnt[blockIdx.x] = __cnt; return; } template<typename T> void tricnt_bsh(T nblock, spmat_t<T> *m, uint64_t *ocnt_d, size_t bmld, cudaStream_t stream) { tricnt_bsh_k<THREADS, 32> <<<nblock, THREADS, sizeof(unsigned int) * bmld, stream>>>(m->nrows, m->rows_d, m->roff_d, m->cols_d, ocnt_d, bmld); cudaCheckError() ; return; } //////////////////////////////////////////////////////////////////////////////////////// template<int WSIZE, int NWARP, int RLEN_THR1, int RLEN_THR2, typename ROW_T, typename OFF_T, typename CNT_T, typename MAP_T> __global__ void tricnt_wrp_ps_k(const ROW_T ner, const ROW_T *__restrict__ rows, const OFF_T *__restrict__ roff, const ROW_T *__restrict__ cols, CNT_T *__restrict__ ocnt, MAP_T *__restrict__ bmap, const size_t bmld) { __shared__ OFF_T sho[NWARP][WSIZE]; __shared__ ROW_T shs[NWARP][WSIZE]; __shared__ ROW_T shc[NWARP][WSIZE]; CNT_T __cnt = 0; ROW_T wid = blockIdx.x * blockDim.y + threadIdx.y; bmap += bmld * wid; for (; wid < ner; wid += gridDim.x * blockDim.y) { const OFF_T rbeg = roff[rows[wid]]; const OFF_T rend = roff[rows[wid] + 1]; //RLEN_THR1 <= 32 if (rend - rbeg <= RLEN_THR1) { const int nloc = rend - rbeg; OFF_T soff; OFF_T eoff; if (threadIdx.x < nloc) { const ROW_T c = cols[rbeg + threadIdx.x]; shc[threadIdx.y][threadIdx.x] = c; soff = roff[c]; eoff = roff[c + 1]; } int mysm = -1; #pragma unroll for (int i = 1; i < RLEN_THR1; i++) { if (i == nloc) break; const OFF_T csoff = utils::shfl(soff, i); const OFF_T ceoff = utils::shfl(eoff, i); if (ceoff - csoff < RLEN_THR2) { if (threadIdx.x == i) mysm = i; continue; } for (OFF_T k = ceoff - 1; k >= csoff; k -= WSIZE) { if (k - (int) threadIdx.x < csoff) break; const ROW_T cc = cols[k - threadIdx.x]; if (cc < shc[threadIdx.y][0]) break; for (int j = i - 1; j >= 0; j--) { if (cc == shc[threadIdx.y][j]) { __cnt++; } } } } if (mysm > -1) { for (OFF_T k = eoff - 1; k >= soff; k--) { const ROW_T cc = cols[k]; if (cc < shc[threadIdx.y][0]) break; for (int j = mysm - 1; j >= 0; j--) { if (cc == shc[threadIdx.y][j]) { __cnt++; } } } } } else { ROW_T firstcol = cols[rbeg]; ROW_T lastcol = cols[rend - 1]; for (OFF_T i = rbeg; i < rend; i += 32) { const ROW_T c = (i + threadIdx.x < rend) ? cols[i + threadIdx.x] : -1; if (c > -1) atomicOr(bmap + c / BITSOF(bmap), ((MAP_T) 1) << (c % BITSOF(bmap))); } for (OFF_T i = rbeg; i < rend; i+= 32) { const ROW_T c = (i + threadIdx.x < rend) ? cols[i + threadIdx.x] : -1; sho[threadIdx.y][threadIdx.x] = (c > -1) ? roff[c] : 0; shc[threadIdx.y][threadIdx.x] = c; ROW_T len = (c > -1) ? roff[c + 1] - sho[threadIdx.y][threadIdx.x] : 0; ROW_T lensum = len; #pragma unroll for (int j = 1; j < 32; j <<= 1) { lensum += (threadIdx.x >= j) * (utils::shfl_up(lensum, j)); } shs[threadIdx.y][threadIdx.x] = lensum - len; lensum = utils::shfl(lensum, 31); int k = WSIZE - 1; for (int j = lensum - 1; j >= 0; j -= WSIZE) { if (j < threadIdx.x) break; // bisect-right for (; k >= 0; k--) { if (shs[threadIdx.y][k] <= j - threadIdx.x) break; } const ROW_T cc = LDG(cols + (sho[threadIdx.y][k] + j - threadIdx.x - shs[threadIdx.y][k])); if (cc < shc[threadIdx.y][k]) continue; // if (cc < firstcol) // continue; const MAP_T mm = ((MAP_T) 1) << (cc % BITSOF(bmap)); if (bmap[cc / BITSOF(bmap)] & mm) { __cnt++; } } } lastcol /= 64; firstcol /= 64; if (lastcol - firstcol < rend - rbeg) { for (int i = firstcol; i <= lastcol; i += WSIZE) { if (i + threadIdx.x <= lastcol) { ((unsigned long long *) bmap)[i + threadIdx.x] = 0ull; } } } else { for (int i = rbeg; i < rend; i += WSIZE) { if (i + threadIdx.x < rend) { bmap[cols[i + threadIdx.x] / BITSOF(bmap)] = 0; } } } } } __syncthreads(); __cnt = block_sum<WSIZE, NWARP, WSIZE>(__cnt); if (threadIdx.x == 0 && threadIdx.y == 0) { ocnt[blockIdx.x] = __cnt; } return; } template<typename T> void tricnt_wrp(T nblock, spmat_t<T> *m, uint64_t *ocnt_d, unsigned int *bmap_d, size_t bmld, cudaStream_t stream) { dim3 block(32, THREADS / 32); tricnt_wrp_ps_k<32, THREADS / 32, WP_LEN_TH1, WP_LEN_TH2> <<<nblock, block, 0, stream>>>(m->nrows, m->rows_d, m->roff_d, m->cols_d, ocnt_d, bmap_d, bmld); cudaCheckError(); return; } ////////////////////////////////////////////////////////////////////////////////////////// template<int BDIM, int LOCLEN, typename ROW_T, typename OFF_T, typename CNT_T> __global__ void tricnt_thr_k(const ROW_T ner, const ROW_T *__restrict__ rows, const OFF_T *__restrict__ roff, const ROW_T *__restrict__ cols, CNT_T *__restrict__ ocnt) { CNT_T __cnt = 0; const ROW_T tid = blockIdx.x * BDIM + threadIdx.x; for (ROW_T rid = tid; rid < ner; rid += gridDim.x * BDIM) { const ROW_T r = rows[rid]; const OFF_T rbeg = roff[r]; const OFF_T rend = roff[r + 1]; const ROW_T rlen = rend - rbeg; if (!rlen) continue; if (rlen <= LOCLEN) { int nloc = 0; ROW_T loc[LOCLEN]; #pragma unroll for (nloc = 0; nloc < LOCLEN; nloc++) { if (rbeg + nloc >= rend) break; loc[nloc] = LDG(cols + rbeg + nloc); } #pragma unroll for (int i = 1; i < LOCLEN; i++) { if (i == nloc) break; const ROW_T c = loc[i]; const OFF_T soff = roff[c]; const OFF_T eoff = roff[c + 1]; for (OFF_T k = eoff - 1; k >= soff; k--) { const ROW_T cc = LDG(cols + k); if (cc < loc[0]) break; for (int j = i - 1; j >= 0; j--) { if (cc == loc[j]) __cnt++; } } } } else { const ROW_T minc = cols[rbeg]; for (int i = 1; i < rlen; i++) { const ROW_T c = LDG(cols + rbeg + i); const OFF_T soff = roff[c]; const OFF_T eoff = roff[c + 1]; for (OFF_T k = eoff - 1; k >= soff; k--) { const ROW_T cc = LDG(cols + k); if (cc < minc) break; for (int j = i - 1; j >= 0; j--) { if (cc == LDG(cols + rbeg + j)) __cnt++; } } } } } __syncthreads(); __cnt = block_sum<BDIM, 1, 32>(__cnt); if (threadIdx.x == 0) ocnt[blockIdx.x] = __cnt; return; } template<typename T> void tricnt_thr(T nblock, spmat_t<T> *m, uint64_t *ocnt_d, cudaStream_t stream) { cudaFuncSetCacheConfig(tricnt_thr_k<THREADS, TH_CENT_K_LOCLEN, typename type_utils<T>::LOCINT, typename type_utils<T>::LOCINT, uint64_t>, cudaFuncCachePreferL1); tricnt_thr_k<THREADS, TH_CENT_K_LOCLEN> <<<nblock, THREADS, 0, stream>>>(m->nrows, m->rows_d, m->roff_d, m->cols_d, ocnt_d); cudaCheckError() ; return; } ///////////////////////////////////////////////////////////////// __global__ void myset(unsigned long long *p, unsigned long long v, long long n) { const long long tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { p[tid] = v; } return; } void myCudaMemset(unsigned long long *p, unsigned long long v, long long n, cudaStream_t stream) { if (n <= 0) return; myset<<<DIV_UP(n, THREADS), THREADS, 0, stream>>>(p, v, n); cudaCheckError(); } template<typename IndexType> struct NonEmptyRow { const IndexType* p_roff; __host__ __device__ NonEmptyRow(const IndexType* roff) : p_roff(roff) { } __host__ __device__ __forceinline__ bool operator()(const IndexType &a) const { return (p_roff[a] < p_roff[a + 1]); } }; template<typename T> void create_nondangling_vector(const T* roff, T *p_nonempty, T *n_nonempty, size_t n, cudaStream_t stream) { if (n <= 0) return; thrust::counting_iterator<T> it(0); NonEmptyRow<T> temp_func(roff); T* d_out_num = (T*) tmp_get(sizeof(*n_nonempty), stream); cubIf(it, p_nonempty, d_out_num, n, temp_func, stream); cudaMemcpy(n_nonempty, d_out_num, sizeof(*n_nonempty), cudaMemcpyDeviceToHost); cudaCheckError(); tmp_release(d_out_num, stream); cudaCheckError(); } template<typename T> uint64_t reduce(uint64_t *v_d, T n, cudaStream_t stream) { uint64_t n_h; uint64_t *n_d = (uint64_t *) tmp_get(sizeof(*n_d), stream); cubSum(v_d, n_d, n, stream); cudaCheckError(); cudaMemcpy(&n_h, n_d, sizeof(*n_d), cudaMemcpyDeviceToHost); cudaCheckError(); tmp_release(n_d, stream); return n_h; } // instantiate for int template void tricnt_bsh<int>(int nblock, spmat_t<int> *m, uint64_t *ocnt_d, size_t bmld, cudaStream_t stream); template void tricnt_wrp<int>(int nblock, spmat_t<int> *m, uint64_t *ocnt_d, unsigned int *bmap_d, size_t bmld, cudaStream_t stream); template void tricnt_thr<int>(int nblock, spmat_t<int> *m, uint64_t *ocnt_d, cudaStream_t stream); template void tricnt_b2b<int>(int nblock, spmat_t<int> *m, uint64_t *ocnt_d, unsigned int *bmapL0_d, size_t bmldL0, unsigned int *bmapL1_d, size_t bmldL1, cudaStream_t stream); template uint64_t reduce<int>(uint64_t *v_d, int n, cudaStream_t stream); template void create_nondangling_vector<int>(const int *roff, int *p_nonempty, int *n_nonempty, size_t n, cudaStream_t stream); } // end namespace triangle counting } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/csrmv_cub.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "nvgraph.h" #include "nvgraphP.h" #include "nvgraph_error.hxx" #include "csrmv_cub.h" #include "cub_semiring/cub.cuh" namespace nvgraph { template <typename I, typename V>template <typename SR> NVGRAPH_ERROR SemiringDispatch<I, V>::Dispatch( const V* d_values, const I* d_row_offsets, const I* d_column_indices, const V* d_vector_x, V* d_vector_y, V alpha, V beta, I num_rows, I num_cols, I num_nonzeros, cudaStream_t stream) { // std::static_assert(std::is_same<typename std::remove_cv<T>::type, int>::value, "current CUB implementation supports int only for indices"); size_t temp_buf_size = 0; cudaError_t err = cub_semiring::cub::DeviceSpmv::CsrMV<V, SR>( NULL, temp_buf_size, d_values, d_row_offsets, d_column_indices, d_vector_x, d_vector_y, alpha, beta, num_rows, num_cols, num_nonzeros, stream); CHECK_CUDA(err); Vector<char> tmp_buf(std::max(temp_buf_size, size_t(1)), stream); err = cub_semiring::cub::DeviceSpmv::CsrMV<V, SR>( tmp_buf.raw(), temp_buf_size, d_values, d_row_offsets, d_column_indices, d_vector_x, d_vector_y, alpha, beta, num_rows, num_cols, num_nonzeros, stream); CHECK_CUDA(err); return NVGRAPH_OK; }; // deconstructs graph, checks parameters and dispatches semiring implementation template <typename I, typename V> NVGRAPH_ERROR SemiringDispatch<I, V>::InitAndLaunch( const nvgraph::MultiValuedCsrGraph<I, V> &graph, const size_t weight_index, const void *p_alpha, const size_t x_index, const void *p_beta, const size_t y_index, const nvgraphSemiring_t SR, cudaStream_t stream ) { if (weight_index >= graph.get_num_edge_dim() || x_index >= graph.get_num_vertex_dim() || y_index >= graph.get_num_vertex_dim()) // base index is 0 return NVGRAPH_ERR_BAD_PARAMETERS; I n = static_cast<I>(graph.get_num_vertices()); I nnz = static_cast<I>(graph.get_num_edges()); const V* vals = graph.get_raw_edge_dim(weight_index); const V* x = graph.get_raw_vertex_dim( x_index); V* y = const_cast<V*>(graph.get_raw_vertex_dim(y_index)); V alpha = *(static_cast<const V*>(p_alpha)); V beta = *(static_cast<const V*>(p_beta)); const I* row_ptr = graph.get_raw_row_offsets(); const I* col_ind = graph.get_raw_column_indices(); NVGRAPH_ERROR err = NVGRAPH_ERR_BAD_PARAMETERS; switch (SR) { case NVGRAPH_PLUS_TIMES_SR: err = Dispatch< cub_semiring::cub::PlusTimesSemiring<V> >(vals, row_ptr, col_ind, x, y, alpha, beta, n, n, nnz, stream); break; case NVGRAPH_MIN_PLUS_SR: err = Dispatch< cub_semiring::cub::MinPlusSemiring<V> >(vals, row_ptr, col_ind, x, y, alpha, beta, n, n, nnz, stream); break; case NVGRAPH_MAX_MIN_SR: err = Dispatch< cub_semiring::cub::MaxMinSemiring<V> >(vals, row_ptr, col_ind, x, y, alpha, beta, n, n, nnz, stream); break; case NVGRAPH_OR_AND_SR: err = Dispatch< cub_semiring::cub::OrAndBoolSemiring<V> >(vals, row_ptr, col_ind, x, y, alpha, beta, n, n, nnz, stream); break; default: break; } return err; }; // API wrapper to avoid bloating main API object nvgraph.cpp NVGRAPH_ERROR SemiringAPILauncher(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t x, const void *beta, const size_t y, const nvgraphSemiring_t sr) { typedef int I; if (descrG->graphStatus!=HAS_VALUES) // need a MultiValuedCsrGraph return NVGRAPH_ERR_BAD_PARAMETERS; if (descrG->TT != NVGRAPH_CSR_32) // supported topologies return NVGRAPH_ERR_BAD_PARAMETERS; cudaStream_t stream = handle->stream; NVGRAPH_ERROR err = NVGRAPH_ERR_NOT_IMPLEMENTED; switch(descrG->T) { case CUDA_R_32F : { const nvgraph::MultiValuedCsrGraph<I, float> *mcsrg = static_cast<const nvgraph::MultiValuedCsrGraph<I, float>*> (descrG->graph_handle); err = SemiringDispatch<I, float>::InitAndLaunch( *mcsrg, weight_index, static_cast<const float*>(alpha), x, static_cast<const float*>(beta), y, sr, stream); break; } case CUDA_R_64F : { const nvgraph::MultiValuedCsrGraph<I, double> *mcsrg = static_cast<const nvgraph::MultiValuedCsrGraph<I, double>*> (descrG->graph_handle); err = SemiringDispatch<I, double>::InitAndLaunch( *mcsrg, weight_index, static_cast<const double*>(alpha), x, static_cast<const double*>(beta), y, sr, stream); break; } default: break; } return err; }; } //namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/src/lanczos.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#ifdef NVGRAPH_PARTITION #define _USE_MATH_DEFINES #include <math.h> #include "lanczos.hxx" #include <stdio.h> #include <time.h> #include <cuda.h> #define USE_CURAND 1 #ifdef USE_CURAND #include <curand.h> #endif #include "nvgraph_error.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_vector_kernels.hxx" #include "nvgraph_cublas.hxx" #include "nvgraph_lapack.hxx" #include "debug_macros.h" // ========================================================= // Useful macros // ========================================================= // Get index of matrix entry #define IDX(i,j,lda) ((i)+(j)*(lda)) // ========================================================= // Macros and functions for cuRAND // ========================================================= //#ifdef USE_CURAND //namespace { // // /// Get message string from cuRAND status code // //static // //const char* curandGetErrorString(curandStatus_t e) { // // switch(e) { // // case CURAND_STATUS_SUCCESS: // // return "CURAND_STATUS_SUCCESS"; // // case CURAND_STATUS_VERSION_MISMATCH: // // return "CURAND_STATUS_VERSION_MISMATCH"; // // case CURAND_STATUS_NOT_INITIALIZED: // // return "CURAND_STATUS_NOT_INITIALIZED"; // // case CURAND_STATUS_ALLOCATION_FAILED: // // return "CURAND_STATUS_ALLOCATION_FAILED"; // // case CURAND_STATUS_TYPE_ERROR: // // return "CURAND_STATUS_TYPE_ERROR"; // // case CURAND_STATUS_OUT_OF_RANGE: // // return "CURAND_STATUS_OUT_OF_RANGE"; // // case CURAND_STATUS_LENGTH_NOT_MULTIPLE: // // return "CURAND_STATUS_LENGTH_NOT_MULTIPLE"; // // case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED: // // return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"; // // case CURAND_STATUS_LAUNCH_FAILURE: // // return "CURAND_STATUS_LAUNCH_FAILURE"; // // case CURAND_STATUS_PREEXISTING_FAILURE: // // return "CURAND_STATUS_PREEXISTING_FAILURE"; // // case CURAND_STATUS_INITIALIZATION_FAILED: // // return "CURAND_STATUS_INITIALIZATION_FAILED"; // // case CURAND_STATUS_ARCH_MISMATCH: // // return "CURAND_STATUS_ARCH_MISMATCH"; // // case CURAND_STATUS_INTERNAL_ERROR: // // return "CURAND_STATUS_INTERNAL_ERROR"; // // default: // // return "unknown cuRAND error"; // // } // //} // // // curandGeneratorNormalX // inline static // curandStatus_t // curandGenerateNormalX(curandGenerator_t generator, // float * outputPtr, size_t n, // float mean, float stddev) { // return curandGenerateNormal(generator, outputPtr, n, mean, stddev); // } // inline static // curandStatus_t // curandGenerateNormalX(curandGenerator_t generator, // double * outputPtr, size_t n, // double mean, double stddev) { // return curandGenerateNormalDouble(generator, outputPtr, // n, mean, stddev); // } // //} //#endif namespace nvgraph { namespace { // ========================================================= // Helper functions // ========================================================= /// Perform Lanczos iteration /** Lanczos iteration is performed on a shifted matrix A+shift*I. * * @param A Matrix. * @param iter Pointer to current Lanczos iteration. On exit, the * variable is set equal to the final Lanczos iteration. * @param maxIter Maximum Lanczos iteration. This function will * perform a maximum of maxIter-*iter iterations. * @param shift Matrix shift. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm (i.e. entry in beta_host) is * less than tol. * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param alpha_host (Output, host memory, maxIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, maxIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Input/output, device memory, * n*(maxIter+1) entries) Lanczos vectors. Vectors are stored as * columns of a column-major matrix with dimensions * n x (maxIter+1). * @param work_dev (Output, device memory, maxIter entries) * Workspace. Not needed if full reorthogonalization is disabled. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int performLanczosIteration(const Matrix<IndexType_, ValueType_> * A, IndexType_ * iter, IndexType_ maxIter, ValueType_ shift, ValueType_ tol, bool reorthogonalize, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful variables const ValueType_ one = 1; const ValueType_ negOne = -1; const ValueType_ zero = 0; IndexType_ n = A->n; // ------------------------------------------------------- // Compute second Lanczos vector // ------------------------------------------------------- if(*iter<=0) { *iter = 1; // Apply matrix if(shift != 0) CHECK_CUDA(cudaMemcpyAsync(lanczosVecs_dev+n, lanczosVecs_dev, n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); A->mv(1, lanczosVecs_dev, shift, lanczosVecs_dev+n); // Orthogonalize Lanczos vector Cublas::dot(n, lanczosVecs_dev, 1, lanczosVecs_dev+IDX(0,1,n), 1, alpha_host); Cublas::axpy(n, -alpha_host[0], lanczosVecs_dev, 1, lanczosVecs_dev+IDX(0,1,n), 1); beta_host[0] = Cublas::nrm2(n, lanczosVecs_dev+IDX(0,1,n), 1); // Check if Lanczos has converged if(beta_host[0] <= tol) return 0; // Normalize Lanczos vector Cublas::scal(n, 1/beta_host[0], lanczosVecs_dev+IDX(0,1,n), 1); } // ------------------------------------------------------- // Compute remaining Lanczos vectors // ------------------------------------------------------- while(*iter<maxIter) { ++(*iter); // Apply matrix if(shift != 0) CHECK_CUDA(cudaMemcpyAsync(lanczosVecs_dev+(*iter)*n, lanczosVecs_dev+(*iter-1)*n, n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); A->mv(1, lanczosVecs_dev+IDX(0,*iter-1,n), shift, lanczosVecs_dev+IDX(0,*iter,n)); // Full reorthogonalization // "Twice is enough" algorithm per Kahan and Parlett if(reorthogonalize) { Cublas::gemv(true, n, *iter, &one, lanczosVecs_dev, n, lanczosVecs_dev+IDX(0,*iter,n), 1, &zero, work_dev, 1); Cublas::gemv(false, n, *iter, &negOne, lanczosVecs_dev, n, work_dev, 1, &one, lanczosVecs_dev+IDX(0,*iter,n), 1); CHECK_CUDA(cudaMemcpyAsync(alpha_host+(*iter-1), work_dev+(*iter-1), sizeof(ValueType_), cudaMemcpyDeviceToHost)); Cublas::gemv(true, n, *iter, &one, lanczosVecs_dev, n, lanczosVecs_dev+IDX(0,*iter,n), 1, &zero, work_dev, 1); Cublas::gemv(false, n, *iter, &negOne, lanczosVecs_dev, n, work_dev, 1, &one, lanczosVecs_dev+IDX(0,*iter,n), 1); } // Orthogonalization with 3-term recurrence relation else { Cublas::dot(n, lanczosVecs_dev+IDX(0,*iter-1,n), 1, lanczosVecs_dev+IDX(0,*iter,n), 1, alpha_host+(*iter-1)); Cublas::axpy(n, -alpha_host[*iter-1], lanczosVecs_dev+IDX(0,*iter-1,n), 1, lanczosVecs_dev+IDX(0,*iter,n), 1); Cublas::axpy(n, -beta_host[*iter-2], lanczosVecs_dev+IDX(0,*iter-2,n), 1, lanczosVecs_dev+IDX(0,*iter,n), 1); } // Compute residual beta_host[*iter-1] = Cublas::nrm2(n, lanczosVecs_dev+IDX(0,*iter,n), 1); // Check if Lanczos has converged if(beta_host[*iter-1] <= tol) break; // Normalize Lanczos vector Cublas::scal(n, 1/beta_host[*iter-1], lanczosVecs_dev+IDX(0,*iter,n), 1); } CHECK_CUDA(cudaDeviceSynchronize()); return 0; } /// Find Householder transform for 3-dimensional system /** Given an input vector v=[x,y,z]', this function finds a * Householder transform P such that P*v is a multiple of * e_1=[1,0,0]'. The input vector v is overwritten with the * Householder vector such that P=I-2*v*v'. * * @param v (Input/output, host memory, 3 entries) Input * 3-dimensional vector. On exit, the vector is set to the * Householder vector. * @param Pv (Output, host memory, 1 entry) First entry of P*v * (here v is the input vector). Either equal to ||v||_2 or * -||v||_2. * @param P (Output, host memory, 9 entries) Householder transform * matrix. Matrix dimensions are 3 x 3. */ template <typename IndexType_, typename ValueType_> static void findHouseholder3(ValueType_ * v, ValueType_ * Pv, ValueType_ * P) { // Compute norm of vector *Pv = std::sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]); // Choose whether to reflect to e_1 or -e_1 // This choice avoids catastrophic cancellation if(v[0] >= 0) *Pv = -(*Pv); v[0] -= *Pv; // Normalize Householder vector ValueType_ normHouseholder = std::sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]); if(normHouseholder != 0) { v[0] /= normHouseholder; v[1] /= normHouseholder; v[2] /= normHouseholder; } else { v[0] = 0; v[1] = 0; v[2] = 0; } // Construct Householder matrix IndexType_ i, j; for(j=0; j<3; ++j) for(i=0; i<3; ++i) P[IDX(i,j,3)] = -2*v[i]*v[j]; for(i=0; i<3; ++i) P[IDX(i,i,3)] += 1; } /// Apply 3-dimensional Householder transform to 4 x 4 matrix /** The Householder transform is pre-applied to the top three rows * of the matrix and post-applied to the left three columns. The * 4 x 4 matrix is intended to contain the bulge that is produced * in the Francis QR algorithm. * * @param v (Input, host memory, 3 entries) Householder vector. * @param A (Input/output, host memory, 16 entries) 4 x 4 matrix. */ template <typename IndexType_, typename ValueType_> static void applyHouseholder3(const ValueType_ * v, ValueType_ * A) { // Loop indices IndexType_ i, j; // Dot product between Householder vector and matrix row/column ValueType_ vDotA; // Pre-apply Householder transform for(j=0; j<4; ++j) { vDotA = 0; for(i=0; i<3; ++i) vDotA += v[i]*A[IDX(i,j,4)]; for(i=0; i<3; ++i) A[IDX(i,j,4)] -= 2*v[i]*vDotA; } // Post-apply Householder transform for(i=0; i<4; ++i) { vDotA = 0; for(j=0; j<3; ++j) vDotA += A[IDX(i,j,4)]*v[j]; for(j=0; j<3; ++j) A[IDX(i,j,4)] -= 2*vDotA*v[j]; } } /// Perform one step of Francis QR algorithm /** Equivalent to two steps of the classical QR algorithm on a * tridiagonal matrix. * * @param n Matrix dimension. * @param shift1 QR algorithm shift. * @param shift2 QR algorithm shift. * @param alpha (Input/output, host memory, n entries) Diagonal * entries of tridiagonal matrix. * @param beta (Input/output, host memory, n-1 entries) * Off-diagonal entries of tridiagonal matrix. * @param V (Input/output, host memory, n*n entries) Orthonormal * transforms from previous steps of QR algorithm. Matrix * dimensions are n x n. On exit, the orthonormal transform from * this Francis QR step is post-applied to the matrix. * @param work (Output, host memory, 3*n entries) Workspace. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int francisQRIteration(IndexType_ n, ValueType_ shift1, ValueType_ shift2, ValueType_ * alpha, ValueType_ * beta, ValueType_ * V, ValueType_ * work) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Temporary storage of 4x4 bulge and Householder vector ValueType_ bulge[16]; // Householder vector ValueType_ householder[3]; // Householder matrix ValueType_ householderMatrix[3*3]; // Shifts are roots of the polynomial p(x)=x^2+b*x+c ValueType_ b = -shift1 - shift2; ValueType_ c = shift1*shift2; // Loop indices IndexType_ i, j, pos; // Temporary variable ValueType_ temp; // ------------------------------------------------------- // Implementation // ------------------------------------------------------- // Compute initial Householder transform householder[0] = alpha[0]*alpha[0] + beta[0]*beta[0] + b*alpha[0] + c; householder[1] = beta[0]*(alpha[0]+alpha[1]+b); householder[2] = beta[0]*beta[1]; findHouseholder3<IndexType_,ValueType_>(householder, &temp, householderMatrix); // Apply initial Householder transform to create bulge memset(bulge, 0, 16*sizeof(ValueType_)); for(i=0; i<4; ++i) bulge[IDX(i,i,4)] = alpha[i]; for(i=0; i<3; ++i) { bulge[IDX(i+1,i,4)] = beta[i]; bulge[IDX(i,i+1,4)] = beta[i]; } applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 3, 3, 1, V, n, householderMatrix, 3, 0, work, n); memcpy(V, work, 3*n*sizeof(ValueType_)); // Chase bulge to bottom-right of matrix with Householder transforms for(pos=0; pos<n-4; ++pos) { // Move to next position alpha[pos] = bulge[IDX(0,0,4)]; householder[0] = bulge[IDX(1,0,4)]; householder[1] = bulge[IDX(2,0,4)]; householder[2] = bulge[IDX(3,0,4)]; for(j=0; j<3; ++j) for(i=0; i<3; ++i) bulge[IDX(i,j,4)] = bulge[IDX(i+1,j+1,4)]; bulge[IDX(3,0,4)] = 0; bulge[IDX(3,1,4)] = 0; bulge[IDX(3,2,4)] = beta[pos+3]; bulge[IDX(0,3,4)] = 0; bulge[IDX(1,3,4)] = 0; bulge[IDX(2,3,4)] = beta[pos+3]; bulge[IDX(3,3,4)] = alpha[pos+4]; // Apply Householder transform findHouseholder3<IndexType_,ValueType_>(householder, beta+pos, householderMatrix); applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 3, 3, 1, V+IDX(0,pos+1,n), n, householderMatrix, 3, 0, work, n); memcpy(V+IDX(0,pos+1,n), work, 3*n*sizeof(ValueType_)); } // Apply penultimate Householder transform // Values in the last row and column are zero alpha[n-4] = bulge[IDX(0,0,4)]; householder[0] = bulge[IDX(1,0,4)]; householder[1] = bulge[IDX(2,0,4)]; householder[2] = bulge[IDX(3,0,4)]; for(j=0; j<3; ++j) for(i=0; i<3; ++i) bulge[IDX(i,j,4)] = bulge[IDX(i+1,j+1,4)]; bulge[IDX(3,0,4)] = 0; bulge[IDX(3,1,4)] = 0; bulge[IDX(3,2,4)] = 0; bulge[IDX(0,3,4)] = 0; bulge[IDX(1,3,4)] = 0; bulge[IDX(2,3,4)] = 0; bulge[IDX(3,3,4)] = 0; findHouseholder3<IndexType_,ValueType_>(householder, beta+n-4, householderMatrix); applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 3, 3, 1, V+IDX(0,n-3,n), n, householderMatrix, 3, 0, work, n); memcpy(V+IDX(0,n-3,n), work, 3*n*sizeof(ValueType_)); // Apply final Householder transform // Values in the last two rows and columns are zero alpha[n-3] = bulge[IDX(0,0,4)]; householder[0] = bulge[IDX(1,0,4)]; householder[1] = bulge[IDX(2,0,4)]; householder[2] = 0; for(j=0; j<3; ++j) for(i=0; i<3; ++i) bulge[IDX(i,j,4)] = bulge[IDX(i+1,j+1,4)]; findHouseholder3<IndexType_,ValueType_>(householder, beta+n-3, householderMatrix); applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 2, 2, 1, V+IDX(0,n-2,n), n, householderMatrix, 3, 0, work, n); memcpy(V+IDX(0,n-2,n), work, 2*n*sizeof(ValueType_)); // Bulge has been eliminated alpha[n-2] = bulge[IDX(0,0,4)]; alpha[n-1] = bulge[IDX(1,1,4)]; beta[n-2] = bulge[IDX(1,0,4)]; return 0; } /// Perform implicit restart of Lanczos algorithm /** Shifts are Chebyshev nodes of unwanted region of matrix spectrum. * * @param n Matrix dimension. * @param iter Current Lanczos iteration. * @param iter_new Lanczos iteration after restart. * @param shiftUpper Pointer to upper bound for unwanted * region. Value is ignored if less than *shiftLower. If a * stronger upper bound has been found, the value is updated on * exit. * @param shiftLower Pointer to lower bound for unwanted * region. Value is ignored if greater than *shiftUpper. If a * stronger lower bound has been found, the value is updated on * exit. * @param alpha_host (Input/output, host memory, iter entries) * Diagonal entries of Lanczos system. * @param beta_host (Input/output, host memory, iter entries) * Off-diagonal entries of Lanczos system. * @param V_host (Output, host memory, iter*iter entries) * Orthonormal transform used to obtain restarted system. Matrix * dimensions are iter x iter. * @param work_host (Output, host memory, 4*iter entries) * Workspace. * @param lanczosVecs_dev (Input/output, device memory, n*(iter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (iter+1). * @param work_dev (Output, device memory, (n+iter)*iter entries) * Workspace. */ template <typename IndexType_, typename ValueType_> static int lanczosRestart(IndexType_ n, IndexType_ iter, IndexType_ iter_new, ValueType_ * shiftUpper, ValueType_ * shiftLower, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ V_host, ValueType_ * __restrict__ work_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, bool smallest_eig) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ zero = 0; const ValueType_ one = 1; // Loop index IndexType_ i; // Number of implicit restart steps // Assumed to be even since each call to Francis algorithm is // equivalent to two calls of QR algorithm IndexType_ restartSteps = iter - iter_new; // Ritz values from Lanczos method ValueType_ * ritzVals_host = work_host + 3*iter; // Shifts for implicit restart ValueType_ * shifts_host; // Orthonormal matrix for similarity transform ValueType_ * V_dev = work_dev + n*iter; // ------------------------------------------------------- // Implementation // ------------------------------------------------------- // Compute Ritz values memcpy(ritzVals_host, alpha_host, iter*sizeof(ValueType_)); memcpy(work_host, beta_host, (iter-1)*sizeof(ValueType_)); Lapack<ValueType_>::sterf(iter, ritzVals_host, work_host); // Debug: Print largest eigenvalues //for (int i = iter-iter_new; i < iter; ++i) // std::cout <<*(ritzVals_host+i)<< " "; //std::cout <<std::endl; // Initialize similarity transform with identity matrix memset(V_host, 0, iter*iter*sizeof(ValueType_)); for(i=0; i<iter; ++i) V_host[IDX(i,i,iter)] = 1; // Determine interval to suppress eigenvalues if (smallest_eig) { if(*shiftLower > *shiftUpper) { *shiftUpper = ritzVals_host[iter-1]; *shiftLower = ritzVals_host[iter_new]; } else { *shiftUpper = max(*shiftUpper, ritzVals_host[iter-1]); *shiftLower = min(*shiftLower, ritzVals_host[iter_new]); } } else { if(*shiftLower > *shiftUpper) { *shiftUpper = ritzVals_host[iter-iter_new-1]; *shiftLower = ritzVals_host[0]; } else { *shiftUpper = max(*shiftUpper, ritzVals_host[iter-iter_new-1]); *shiftLower = min(*shiftLower, ritzVals_host[0]); } } // Calculate Chebyshev nodes as shifts shifts_host = ritzVals_host; for(i=0; i<restartSteps; ++i) { shifts_host[i] = cos((i+0.5)*static_cast<ValueType_>(M_PI)/restartSteps); shifts_host[i] *= 0.5*((*shiftUpper)-(*shiftLower)); shifts_host[i] += 0.5*((*shiftUpper)+(*shiftLower)); } // Apply Francis QR algorithm to implicitly restart Lanczos for(i=0; i<restartSteps; i+=2) if(francisQRIteration(iter, shifts_host[i], shifts_host[i+1], alpha_host, beta_host, V_host, work_host)) WARNING("error in implicitly shifted QR algorithm"); // Obtain new residual CHECK_CUDA(cudaMemcpyAsync(V_dev, V_host, iter*iter*sizeof(ValueType_), cudaMemcpyHostToDevice)); beta_host[iter-1] = beta_host[iter-1]*V_host[IDX(iter-1,iter_new-1,iter)]; Cublas::gemv(false, n, iter, beta_host+iter_new-1, lanczosVecs_dev, n, V_dev+IDX(0,iter_new,iter), 1, beta_host+iter-1, lanczosVecs_dev+IDX(0,iter,n), 1); // Obtain new Lanczos vectors Cublas::gemm(false, false, n, iter_new, iter, &one, lanczosVecs_dev, n, V_dev, iter, &zero, work_dev, n); CHECK_CUDA(cudaMemcpyAsync(lanczosVecs_dev, work_dev, n*iter_new*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); // Normalize residual to obtain new Lanczos vector CHECK_CUDA(cudaMemcpyAsync(lanczosVecs_dev+IDX(0,iter_new,n), lanczosVecs_dev+IDX(0,iter,n), n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); beta_host[iter_new-1] = Cublas::nrm2(n, lanczosVecs_dev+IDX(0,iter_new,n), 1); Cublas::scal(n, 1/beta_host[iter_new-1], lanczosVecs_dev+IDX(0,iter_new,n), 1); return 0; } } // ========================================================= // Eigensolver // ========================================================= /// Compute smallest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are smallest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the smallest unwanted eigenvalue * (i.e. the (nEigVecs+1)th smallest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param effIter On exit, pointer to final size of Lanczos system. * @param totalIter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param shift On exit, pointer to matrix shift (estimate for * largest eigenvalue). * @param alpha_host (Output, host memory, restartIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, restartIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Output, device memory, n*(restartIter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (restartIter+1). * @param work_dev (Output, device memory, * (n+restartIter)*restartIter entries) Workspace. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to smallest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeSmallestEigenvectors(const Matrix<IndexType_,ValueType_> * A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ * effIter, IndexType_ * totalIter, ValueType_ * shift, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ one = 1; const ValueType_ zero = 0; // Matrix dimension IndexType_ n = A->n; // Shift for implicit restart ValueType_ shiftUpper; ValueType_ shiftLower; // Lanczos iteration counters IndexType_ maxIter_curr = restartIter; // Maximum size of Lanczos system // Status flags int status; // Loop index IndexType_ i; // Host memory ValueType_ * Z_host; // Eigenvectors in Lanczos basis ValueType_ * work_host; // Workspace // ------------------------------------------------------- // Check that LAPACK is enabled // ------------------------------------------------------- //Lapack<ValueType_>::check_lapack_enabled(); // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(A->m != A->n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < nEigVecs) { WARNING("invalid parameters (restartIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // ------------------------------------------------------- // Variable initialization // ------------------------------------------------------- // Total number of Lanczos iterations *totalIter = 0; // Allocate host memory Z_host = (ValueType_*) malloc(restartIter*restartIter *sizeof(ValueType_)); if(Z_host==NULL) WARNING("could not allocate host memory"); work_host = (ValueType_*) malloc(4*restartIter*sizeof(ValueType_)); if(work_host==NULL) WARNING("could not allocate host memory"); // Initialize cuBLAS Cublas::set_pointer_mode_host(); // ------------------------------------------------------- // Compute largest eigenvalue to determine shift // ------------------------------------------------------- #ifdef USE_CURAND // Random number generator curandGenerator_t randGen; // Initialize random number generator CHECK_CURAND(curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_PHILOX4_32_10)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(randGen, 123456/*time(NULL)*/)); // Initialize initial Lanczos vector CHECK_CURAND(curandGenerateNormalX(randGen, lanczosVecs_dev, n+n%2, zero, one)); ValueType_ normQ1 = Cublas::nrm2(n, lanczosVecs_dev, 1); Cublas::scal(n, 1/normQ1, lanczosVecs_dev, 1); #else fill_raw_vec (lanczosVecs_dev, n, (ValueType_)1.0/n); // doesn't work #endif // Estimate number of Lanczos iterations // See bounds in Kuczynski and Wozniakowski (1992). //const ValueType_ relError = 0.25; // Relative error //const ValueType_ failProb = 1e-4; // Probability of failure //maxIter_curr = log(n/pow(failProb,2))/(4*std::sqrt(relError)) + 1; //maxIter_curr = min(maxIter_curr, restartIter); // Obtain tridiagonal matrix with Lanczos *effIter = 0; *shift = 0; status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, 0.0, reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); // Determine largest eigenvalue Lapack<ValueType_>::sterf(*effIter, alpha_host, beta_host); *shift = -alpha_host[*effIter-1]; //std::cout << *shift <<std::endl; // ------------------------------------------------------- // Compute eigenvectors of shifted matrix // ------------------------------------------------------- // Obtain tridiagonal matrix with Lanczos *effIter = 0; //maxIter_curr = min(maxIter, restartIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, 0, reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter; // Apply Lanczos method until convergence shiftLower = 1; shiftUpper = -1; while(*totalIter<maxIter && beta_host[*effIter-1]>tol*shiftLower) { // Determine number of restart steps // Number of steps must be even due to Francis algorithm IndexType_ iter_new = nEigVecs+1; if(restartIter-(maxIter-*totalIter) > nEigVecs+1) iter_new = restartIter-(maxIter-*totalIter); if((restartIter-iter_new) % 2) iter_new -= 1; if(iter_new==*effIter) break; // Implicit restart of Lanczos method status = lanczosRestart<IndexType_, ValueType_> (n, *effIter, iter_new, &shiftUpper, &shiftLower, alpha_host, beta_host, Z_host, work_host, lanczosVecs_dev, work_dev, true); if(status) WARNING("error in Lanczos implicit restart"); *effIter = iter_new; // Check for convergence if(beta_host[*effIter-1] <= tol*fabs(shiftLower)) break; // Proceed with Lanczos method //maxIter_curr = min(restartIter, maxIter-*totalIter+*effIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, tol*fabs(shiftLower), reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter-iter_new; } // Warning if Lanczos has failed to converge if(beta_host[*effIter-1] > tol*fabs(shiftLower)) { WARNING("implicitly restarted Lanczos failed to converge"); } // Solve tridiagonal system memcpy(work_host+2*(*effIter), alpha_host, (*effIter)*sizeof(ValueType_)); memcpy(work_host+3*(*effIter), beta_host, (*effIter-1)*sizeof(ValueType_)); Lapack<ValueType_>::steqr('I', *effIter, work_host+2*(*effIter), work_host+3*(*effIter), Z_host, *effIter, work_host); // Obtain desired eigenvalues by applying shift for(i=0; i<*effIter; ++i) work_host[i+2*(*effIter)] -= *shift; for(i=*effIter; i<nEigVecs; ++i) work_host[i+2*(*effIter)] = 0; // Copy results to device memory CHECK_CUDA(cudaMemcpy(eigVals_dev, work_host+2*(*effIter), nEigVecs*sizeof(ValueType_), cudaMemcpyHostToDevice)); //for (int i = 0; i < nEigVecs; ++i) //{ // std::cout <<*(work_host+(2*(*effIter)+i))<< std::endl; //} CHECK_CUDA(cudaMemcpy(work_dev, Z_host, (*effIter)*nEigVecs*sizeof(ValueType_), cudaMemcpyHostToDevice)); // Convert eigenvectors from Lanczos basis to standard basis Cublas::gemm(false, false, n, nEigVecs, *effIter, &one, lanczosVecs_dev, n, work_dev, *effIter, &zero, eigVecs_dev, n); // Clean up and exit free(Z_host); free(work_host); #ifdef USE_CURAND CHECK_CURAND(curandDestroyGenerator(randGen)); #endif return NVGRAPH_OK; } /// Compute smallest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are smallest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * CNMEM must be initialized before calling this function. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the smallest unwanted eigenvalue * (i.e. the (nEigVecs+1)th smallest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param iter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Smallest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to smallest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeSmallestEigenvectors(const Matrix<IndexType_,ValueType_> & A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ & iter, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // CUDA stream // TODO: handle non-zero streams cudaStream_t stream = 0; // Matrix dimension IndexType_ n = A.n; // Check that parameters are valid if(A.m != A.n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < nEigVecs) { WARNING("invalid parameters (restartIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Allocate memory ValueType_ * alpha_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); ValueType_ * beta_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); Vector<ValueType_> lanczosVecs_dev(n*(restartIter+1), stream); Vector<ValueType_> work_dev((n+restartIter)*restartIter, stream); // Perform Lanczos method IndexType_ effIter; ValueType_ shift; NVGRAPH_ERROR status = computeSmallestEigenvectors(&A, nEigVecs, maxIter, restartIter, tol, reorthogonalize, &effIter, &iter, &shift, alpha_host, beta_host, lanczosVecs_dev.raw(), work_dev.raw(), eigVals_dev, eigVecs_dev); // Clean up and return free(alpha_host); free(beta_host); return status; } // ========================================================= // Eigensolver // ========================================================= /// Compute largest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are largest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the largest unwanted eigenvalue * (i.e. the (nEigVecs+1)th largest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param effIter On exit, pointer to final size of Lanczos system. * @param totalIter On exit, pointer to total number of Lanczos * iterations performed. * @param alpha_host (Output, host memory, restartIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, restartIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Output, device memory, n*(restartIter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (restartIter+1). * @param work_dev (Output, device memory, * (n+restartIter)*restartIter entries) Workspace. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to largest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeLargestEigenvectors(const Matrix<IndexType_,ValueType_> * A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ * effIter, IndexType_ * totalIter, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ one = 1; const ValueType_ zero = 0; // Matrix dimension IndexType_ n = A->n; // Lanczos iteration counters IndexType_ maxIter_curr = restartIter; // Maximum size of Lanczos system // Status flags int status; // Loop index IndexType_ i; // Host memory ValueType_ * Z_host; // Eigenvectors in Lanczos basis ValueType_ * work_host; // Workspace // ------------------------------------------------------- // Check that LAPACK is enabled // ------------------------------------------------------- //Lapack<ValueType_>::check_lapack_enabled(); // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(A->m != A->n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter <= nEigVecs) { WARNING("invalid parameters (restartIter<=nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // ------------------------------------------------------- // Variable initialization // ------------------------------------------------------- // Total number of Lanczos iterations *totalIter = 0; // Allocate host memory Z_host = (ValueType_*) malloc(restartIter*restartIter *sizeof(ValueType_)); if(Z_host==NULL) WARNING("could not allocate host memory"); work_host = (ValueType_*) malloc(4*restartIter*sizeof(ValueType_)); if(work_host==NULL) WARNING("could not allocate host memory"); // Initialize cuBLAS Cublas::set_pointer_mode_host(); // ------------------------------------------------------- // Compute largest eigenvalue // ------------------------------------------------------- #ifdef USE_CURAND // Random number generator curandGenerator_t randGen; // Initialize random number generator CHECK_CURAND(curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_PHILOX4_32_10)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(randGen, 123456)); // Initialize initial Lanczos vector CHECK_CURAND(curandGenerateNormalX(randGen, lanczosVecs_dev, n+n%2, zero, one)); ValueType_ normQ1 = Cublas::nrm2(n, lanczosVecs_dev, 1); Cublas::scal(n, 1/normQ1, lanczosVecs_dev, 1); #else fill_raw_vec (lanczosVecs_dev, n, (ValueType_)1.0/n); // doesn't work #endif // Estimate number of Lanczos iterations // See bounds in Kuczynski and Wozniakowski (1992). //const ValueType_ relError = 0.25; // Relative error //const ValueType_ failProb = 1e-4; // Probability of failure //maxIter_curr = log(n/pow(failProb,2))/(4*std::sqrt(relError)) + 1; //maxIter_curr = min(maxIter_curr, restartIter); // Obtain tridiagonal matrix with Lanczos *effIter = 0; ValueType_ shift_val=0.0; ValueType_ *shift = &shift_val; //maxIter_curr = min(maxIter, restartIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, 0, reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter; // Apply Lanczos method until convergence ValueType_ shiftLower = 1; ValueType_ shiftUpper = -1; while(*totalIter<maxIter && beta_host[*effIter-1]>tol*shiftLower) { // Determine number of restart steps // Number of steps must be even due to Francis algorithm IndexType_ iter_new = nEigVecs+1; if(restartIter-(maxIter-*totalIter) > nEigVecs+1) iter_new = restartIter-(maxIter-*totalIter); if((restartIter-iter_new) % 2) iter_new -= 1; if(iter_new==*effIter) break; // Implicit restart of Lanczos method status = lanczosRestart<IndexType_, ValueType_> (n, *effIter, iter_new, &shiftUpper, &shiftLower, alpha_host, beta_host, Z_host, work_host, lanczosVecs_dev, work_dev, false); if(status) WARNING("error in Lanczos implicit restart"); *effIter = iter_new; // Check for convergence if(beta_host[*effIter-1] <= tol*fabs(shiftLower)) break; // Proceed with Lanczos method //maxIter_curr = min(restartIter, maxIter-*totalIter+*effIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, tol*fabs(shiftLower), reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter-iter_new; } // Warning if Lanczos has failed to converge if(beta_host[*effIter-1] > tol*fabs(shiftLower)) { WARNING("implicitly restarted Lanczos failed to converge"); } for (int i = 0; i < restartIter; ++i) { for (int j = 0; j < restartIter; ++j) Z_host[i*restartIter+j] = 0; } // Solve tridiagonal system memcpy(work_host+2*(*effIter), alpha_host, (*effIter)*sizeof(ValueType_)); memcpy(work_host+3*(*effIter), beta_host, (*effIter-1)*sizeof(ValueType_)); Lapack<ValueType_>::steqr('I', *effIter, work_host+2*(*effIter), work_host+3*(*effIter), Z_host, *effIter, work_host); // note: We need to pick the top nEigVecs eigenvalues // but effItter can be larger than nEigVecs // hence we add an offset for that case, because we want to access top nEigVecs eigenpairs in the matrix of size effIter. // remember the array is sorted, so it is not needed for smallest eigenvalues case because the first ones are the smallest ones IndexType_ top_eigenparis_idx_offset = *effIter - nEigVecs; //Debug : print nEigVecs largest eigenvalues //for (int i = top_eigenparis_idx_offset; i < *effIter; ++i) // std::cout <<*(work_host+(2*(*effIter)+i))<< " "; //std::cout <<std::endl; //Debug : print nEigVecs largest eigenvectors //for (int i = top_eigenparis_idx_offset; i < *effIter; ++i) //{ // for (int j = 0; j < *effIter; ++j) // std::cout <<Z_host[i*(*effIter)+j]<< " "; // std::cout <<std::endl; //} // Obtain desired eigenvalues by applying shift for(i=0; i<*effIter; ++i) work_host[i+2*(*effIter)] -= *shift; for(i=0; i<top_eigenparis_idx_offset; ++i) work_host[i+2*(*effIter)] = 0; // Copy results to device memory // skip smallest eigenvalue if needed CHECK_CUDA(cudaMemcpy(eigVals_dev, work_host+2*(*effIter)+top_eigenparis_idx_offset, nEigVecs*sizeof(ValueType_), cudaMemcpyHostToDevice)); // skip smallest eigenvector if needed CHECK_CUDA(cudaMemcpy(work_dev, Z_host+(top_eigenparis_idx_offset*(*effIter)), (*effIter)*nEigVecs*sizeof(ValueType_), cudaMemcpyHostToDevice)); // Convert eigenvectors from Lanczos basis to standard basis Cublas::gemm(false, false, n, nEigVecs, *effIter, &one, lanczosVecs_dev, n, work_dev, *effIter, &zero, eigVecs_dev, n); // Clean up and exit free(Z_host); free(work_host); #ifdef USE_CURAND CHECK_CURAND(curandDestroyGenerator(randGen)); #endif return NVGRAPH_OK; } /// Compute largest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are largest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * CNMEM must be initialized before calling this function. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the largest unwanted eigenvalue * (i.e. the (nEigVecs+1)th largest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param iter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to largest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeLargestEigenvectors(const Matrix<IndexType_,ValueType_> & A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ & iter, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // CUDA stream // TODO: handle non-zero streams cudaStream_t stream = 0; // Matrix dimension IndexType_ n = A.n; // Check that parameters are valid if(A.m != A.n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < nEigVecs) { WARNING("invalid parameters (restartIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Allocate memory ValueType_ * alpha_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); ValueType_ * beta_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); Vector<ValueType_> lanczosVecs_dev(n*(restartIter+1), stream); Vector<ValueType_> work_dev((n+restartIter)*restartIter, stream); // Perform Lanczos method IndexType_ effIter; NVGRAPH_ERROR status = computeLargestEigenvectors(&A, nEigVecs, maxIter, restartIter, tol, reorthogonalize, &effIter, &iter, alpha_host, beta_host, lanczosVecs_dev.raw(), work_dev.raw(), eigVals_dev, eigVecs_dev); // Clean up and return free(alpha_host); free(beta_host); return status; } // ========================================================= // Explicit instantiation // ========================================================= template NVGRAPH_ERROR computeSmallestEigenvectors<int,float> (const Matrix<int,float> * A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int * iter, int * totalIter, float * shift, float * __restrict__ alpha_host, float * __restrict__ beta_host, float * __restrict__ lanczosVecs_dev, float * __restrict__ work_dev, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeSmallestEigenvectors<int,double> (const Matrix<int,double> * A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int * iter, int * totalIter, double * shift, double * __restrict__ alpha_host, double * __restrict__ beta_host, double * __restrict__ lanczosVecs_dev, double * __restrict__ work_dev, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeSmallestEigenvectors<int, float> (const Matrix<int,float> & A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int & iter, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeSmallestEigenvectors<int, double> (const Matrix<int,double> & A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int & iter, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int,float> (const Matrix<int,float> * A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int * iter, int * totalIter, float * __restrict__ alpha_host, float * __restrict__ beta_host, float * __restrict__ lanczosVecs_dev, float * __restrict__ work_dev, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int,double> (const Matrix<int,double> * A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int * iter, int * totalIter, double * __restrict__ alpha_host, double * __restrict__ beta_host, double * __restrict__ lanczosVecs_dev, double * __restrict__ work_dev, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int, float> (const Matrix<int,float> & A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int & iter, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int, double> (const Matrix<int,double> & A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int & iter, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); } //#endif //NVGRAPH_PARTITION
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_mv_float_sum.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, float>* contract_graph_mv_float_sum(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(contract_from_aggregates_t<int, float, SemiRingFctrSelector<Sum, float>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_mv_float_max.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, float>* contract_graph_mv_float_max(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(contract_from_aggregates_t<int, float, SemiRingFctrSelector<Max, float>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_mv_double_mul.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, double>* contract_graph_mv_double_mul(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(contract_from_aggregates_t<int, double, SemiRingFctrSelector<Multiply, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_csr_sum.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // CsrGraph<int>* contract_graph_csr_sum(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return contract_from_aggregates_t<int, double, SemiRingFctrSelector<Sum, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce)); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_csr_min.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // CsrGraph<int>* contract_graph_csr_min(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return contract_from_aggregates_t<int, double, SemiRingFctrSelector<Min, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce)); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_csr_mul.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // CsrGraph<int>* contract_graph_csr_mul(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return contract_from_aggregates_t<int, double, SemiRingFctrSelector<Multiply, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce)); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_mv_float_min.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, float>* contract_graph_mv_float_min(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(contract_from_aggregates_t<int, float, SemiRingFctrSelector<Min, float>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_mv_double_max.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, double>* contract_graph_mv_double_max(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(contract_from_aggregates_t<int, double, SemiRingFctrSelector<Max, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_mv_float_mul.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, float>* contract_graph_mv_float_mul(MultiValuedCsrGraph<int, float>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, float>*>(contract_from_aggregates_t<int, float, SemiRingFctrSelector<Multiply, float>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_mv_double_sum.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, double>* contract_graph_mv_double_sum(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(contract_from_aggregates_t<int, double, SemiRingFctrSelector<Sum, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_mv_double_min.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, double>* contract_graph_mv_double_min(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(contract_from_aggregates_t<int, double, SemiRingFctrSelector<Min, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
0
rapidsai_public_repos/nvgraph/cpp/src
rapidsai_public_repos/nvgraph/cpp/src/graph_contraction/contraction_csr_max.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // CsrGraph<int>* contract_graph_csr_max(CsrGraph<int>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return contract_from_aggregates_t<int, double, SemiRingFctrSelector<Max, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce)); } }
0
rapidsai_public_repos/nvgraph/conda-recipes
rapidsai_public_repos/nvgraph/conda-recipes/nvgraph/build.sh
#!/usr/bin/env bash CMAKE_COMMON_VARIABLES=" -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX11_ABI=$CMAKE_CXX11_ABI" if [ -n "$MACOSX_DEPLOYMENT_TARGET" ]; then # C++11 requires 10.9 # but cudatoolkit 8 is build for 10.11 export MACOSX_DEPLOYMENT_TARGET=10.11 fi # show environment printenv # Cleanup local git git clean -xdf # Change directory for build process cd cpp # Use CMake-based build procedure mkdir build cd build # configure cmake $CMAKE_COMMON_VARIABLES .. # build make -j VERBOSE=1 install
0
rapidsai_public_repos/nvgraph/conda-recipes
rapidsai_public_repos/nvgraph/conda-recipes/nvgraph/meta.yaml
# Copyright (c) 2018, NVIDIA CORPORATION. # Usage: # conda build -c defaults -c conda-forge . {% set version = environ.get('GIT_DESCRIBE_TAG', '0.0.0.dev').lstrip('v') %} {% set git_revision_count=environ.get('GIT_DESCRIBE_NUMBER', 0) %} {% set cuda_version='.'.join(environ.get('CUDA_VERSION', 'unknown').split('.')[:2]) %} package: name: nvgraph version: {{ version }} source: path: ../.. build: number: {{ git_revision_count }} string: cuda{{ cuda_version }}_{{ git_revision_count }} requirements: build: - cmake 3.12.4 about: home: http://nvidia.com/ license: LICENSE AGREEMENT FOR NVIDIA SOFTWARE DEVELOPMENT KITS license_file: LICENSE summary: nvgraph Library
0
rapidsai_public_repos/nvgraph/conda-recipes
rapidsai_public_repos/nvgraph/conda-recipes/nvgraph/LICENSE
LICENSE AGREEMENT FOR NVIDIA SOFTWARE DEVELOPMENT KITS (July 26, 2018 version) This license agreement, including exhibits attached ("Agreement”) is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of a NVIDIA software development kit (“SDK”). Each SDK has its own set of software and materials, but here is a description of the types of items that may be included in a SDK: source code, header files, APIs, data sets and assets (examples include images, textures, models, scenes, videos, native API input/output files), binary software, sample code, libraries, utility programs, programming code and documentation. This Agreement can be accepted only by an adult of legal age of majority in the country in which the SDK is used. If you are entering into this Agreement on behalf of a company or other legal entity, you represent that you have the legal authority to bind the entity to this Agreement, in which case “you” will mean the entity you represent. If you don’t have the required age or authority to accept this Agreement, or if you don’t accept all the terms and conditions of this Agreement, do not download, install or use the SDK. You agree to use the SDK only for purposes that are permitted by (a) this Agreement, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. 1. License. 1.1 Grant Subject to the terms of this Agreement, NVIDIA hereby grants you a non-exclusive, non-transferable license, without the right to sublicense (except as expressly provided in this Agreement) to: (i) Install and use the SDK, (ii) Modify and create derivative works of sample source code delivered in the SDK, and (iii) Distribute those portions of the SDK that are identified in this Agreement as distributable, as incorporated in object code format into a software application that meets the distribution requirements indicated in this Agreement. 1.2 Distribution Requirements These are the distribution requirements for you to exercise the distribution grant: (i) Your application must have material additional functionality, beyond the included portions of the SDK. (ii) The distributable portions of the SDK shall only be accessed by your application. (iii) The following notice shall be included in modifications and derivative works of sample source code distributed: “This software contains source code provided by NVIDIA Corporation.” (iv) Unless a developer tool is identified in this Agreement as distributable, it is delivered for your internal use only. (v) The terms under which you distribute your application must be consistent with the terms of this Agreement, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. Additionally, you agree that you will protect the privacy, security and legal rights of your application users. (vi) You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SDK not in compliance with the requirements of this Agreement, and to enforce the terms of your agreements with respect to distributed SDK. 1.3 Authorized Users You may allow employees and contractors of your entity or of your subsidiary(ies) to access and use the SDK from your secure network to perform work on your behalf. If you are an academic institution you may allow users enrolled or employed by the academic institution to access and use the SDK from your secure network. You are responsible for the compliance with the terms of this Agreement by your authorized users. If you become aware that your authorized users didn’t follow the terms of this Agreement, you agree to take reasonable steps to resolve the non-compliance and prevent new occurrences. 1.4 Pre-Release SDK The SDK versions identified as alpha, beta, preview or otherwise as pre-release, may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, accessibility, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. Use of a pre-release SDK may result in unexpected results, loss of data, project delays or other unpredictable damage or loss. You may use a pre-release SDK at your own risk, understanding that pre-release SDKs are not intended for use in production or business-critical systems. NVIDIA may choose not to make available a commercial version of any pre-release SDK. NVIDIA may also choose to abandon development and terminate the availability of a pre-release SDK at any time without liability. 1.5 Updates NVIDIA may, at its option, make available patches, workarounds or other updates to this SDK. Unless the updates are provided with their separate governing terms, they are deemed part of the SDK licensed to you as provided in this Agreement. You agree that the form and content of the SDK that NVIDIA provides may change without prior notice to you. While NVIDIA generally maintains compatibility between versions, NVIDIA may in some cases make changes that introduce incompatibilities in future versions of the SDK. 1.6 Third Party Licenses The SDK may come bundled with, or otherwise include or be distributed with, third party software licensed by a NVIDIA supplier and/or open source software provided under an open source license. Use of third party software is subject to the third-party license terms, or in the absence of third party terms, the terms of this Agreement. Copyright to third party software is held by the copyright holders indicated in the third-party software or license. 1.7 Reservation of Rights NVIDIA reserves all rights, title and interest in and to the SDK not expressly granted to you under this Agreement. 2. Limitations. The following license limitations apply to your use of the SDK: 2.1 You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SDK or copies of the SDK. 2.2 Except as expressly provided in this Agreement, you may not copy, sell, rent, sublicense, transfer, distribute, modify, or create derivative works of any portion of the SDK. For clarity, you may not distribute or sublicense the SDK as a stand-alone product. 2.3 Unless you have an agreement with NVIDIA for this purpose, you may not indicate that an application created with the SDK is sponsored or endorsed by NVIDIA. 2.4 You may not bypass, disable, or circumvent any encryption, security, digital rights management or authentication mechanism in the SDK. 2.5 You may not use the SDK in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SDK be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. 2.6 Unless you have an agreement with NVIDIA for this purpose, you may not use the SDK with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in nuclear, avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SDK for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. 2.7 You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to your use of the SDK outside of the scope of this Agreement, or not in compliance with its terms. 3. Ownership. 3.1 NVIDIA or its licensors hold all rights, title and interest in and to the SDK and its modifications and derivative works, including their respective intellectual property rights, subject to your rights under Section 3.2. This SDK may include software and materials from NVIDIA’s licensors, and these licensors are intended third party beneficiaries that may enforce this Agreement with respect to their intellectual property rights. 3.2 You hold all rights, title and interest in and to your applications and your derivative works of the sample source code delivered in the SDK, including their respective intellectual property rights, subject to NVIDIA’s rights under section 3.1. 3.3 You may, but don’t have to, provide to NVIDIA suggestions, feature requests or other feedback regarding the SDK, including possible enhancements or modifications to the SDK. For any feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) it without the payment of any royalties or fees to you. NVIDIA will use feedback at its choice. NVIDIA is constantly looking for ways to improve its products, so you may send feedback to NVIDIA through the developer portal at https://developer.nvidia.com. 4. No Warranties. THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF DEALING OR COURSE OF TRADE. 5. Limitations of Liability. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. These exclusions and limitations of liability shall apply regardless if NVIDIA or its affiliates have been advised of the possibility of such damages, and regardless of whether a remedy fails its essential purpose. These exclusions and limitations of liability form an essential basis of the bargain between the parties, and, absent any of these exclusions or limitations of liability, the provisions of this Agreement, including, without limitation, the economic terms, would be substantially different. 6. Termination. 6.1 This Agreement will continue to apply until terminated by either you or NVIDIA as described below. 6.2 If you want to terminate this Agreement, you may do so by stopping to use the SDK. 6.3 NVIDIA may, at any time, terminate this Agreement if: (i) you fail to comply with any term of this Agreement and the non-compliance is not fixed within thirty (30) days following notice from NVIDIA (or immediately if you violate NVIDIA’s intellectual property rights); (ii) you commence or participate in any legal proceeding against NVIDIA with respect to the SDK; or (iii) NVIDIA decides to no longer provide the SDK in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. 6.4 Upon any termination of this Agreement, you agree to promptly discontinue use of the SDK and destroy all copies in your possession or control. Your prior distributions in accordance with this Agreement are not affected by the termination of this Agreement. Upon written request, you will certify in writing that you have complied with your commitments under this section. Upon any termination of this Agreement all provisions survive except for the license grant provisions. 7. General. If you wish to assign this Agreement or your rights and obligations, including by merger, consolidation, dissolution or operation of law, contact NVIDIA to ask for permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. NVIDIA may assign, delegate or transfer this Agreement and its rights and obligations, and if to a non-affiliate you will be notified. You agree to cooperate with NVIDIA and provide reasonably requested information to verify your compliance with this Agreement. This Agreement will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this Agreement. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. If any court of competent jurisdiction determines that any provision of this Agreement is illegal, invalid or unenforceable, such provision will be construed as limited to the extent necessary to be consistent with and fully enforceable under the law and the remaining provisions will remain in full force and effect. Unless otherwise specified, remedies are cumulative. Each party acknowledges and agrees that the other is an independent contractor in the performance of this Agreement. The SDK has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this Agreement pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (c)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. The SDK is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SDK into any country, or use the SDK in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this Agreement, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SDK. Any notice delivered by NVIDIA to you under this Agreement will be delivered via mail, email or fax. You agree that any notices that NVIDIA sends you electronically will satisfy any legal communication requirements. Please direct your legal notices or other correspondence to NVIDIA Corporation, 2788 San Tomas Expressway, Santa Clara, California 95051, United States of America, Attention: Legal Department. This Agreement and any exhibits incorporated into this Agreement constitute the entire agreement of the parties with respect to the subject matter of this Agreement and supersede all prior negotiations or documentation exchanged between the parties relating to this SDK license. Any additional and/or conflicting terms on documents issued by you are null, void, and invalid. Any amendment or waiver under this Agreement shall be in writing and signed by representatives of both parties. CUDA STRING SUPPLEMENT TO SOFTWARE LICENSE AGREEMENT FOR NVIDIA SOFTWARE DEVELOPMENT KITS (September 18, 2018 version) The terms in this supplement govern your use of the NVIDIA CUDA String SDK under the terms of your license agreement (“Agreement”) as modified by this supplement. Capitalized terms used but not defined below have the meaning assigned to them in the Agreement. This supplement is an exhibit to the Agreement and is incorporated as an integral part of the Agreement. In the event of conflict between the terms in this supplement and the terms in the Agreement, the terms in this supplement govern. 1. License Scope. The SDK is licensed for you to develop applications only for use in systems with NVIDIA GPUs. 2. Distribution. The following portions of the SDK are distributable under the Agreement: cuString library. 3. Licensing. If the distribution terms in this Agreement are not suitable for your organization, or for any questions regarding this Agreement, please contact NVIDIA at nvidia-compute-license-questions@nvidia.com.
0
rapidsai_public_repos/nvgraph
rapidsai_public_repos/nvgraph/test/Makefile
# Makefile for building NVCompute/CUDA BLAS library SOLNDIR := ../. # Get the profile settings ifdef VULCAN include $(VULCAN_TOOLKIT_BASE)/build/getprofile.mk include $(VULCAN_TOOLKIT_BASE)/build/config/$(PROFILE).mk include $(VULCAN_TOOLKIT_BASE)/build/config/DetectOS.mk else include ../../build/getprofile.mk include ../../build/config/$(PROFILE).mk include ../../build/config/DetectOS.mk endif export I_AM_SLOPPY = 1 AGNOSTIC_PROJECTS += nvgraph_test AGNOSTIC_PROJECTS += nvgraph_capi_tests AGNOSTIC_PROJECTS += nvgraph_capi_tests_subgraph AGNOSTIC_PROJECTS += nvgraph_capi_tests_conversion AGNOSTIC_PROJECTS += nvgraph_benchmark AGNOSTIC_PROJECTS += nvgraph_capi_tests_clustering AGNOSTIC_PROJECTS += nvgraph_capi_tests_contraction AGNOSTIC_PROJECTS += nvgraph_capi_tests_traversal AGNOSTIC_PROJECTS += nvgraph_capi_tests_triangles AGNOSTIC_PROJECTS += nvgraph_2d_partitioning_test AGNOSTIC_PROJECTS += nvgraph_capi_tests_2d_bfs AGNOSTIC_PROJECTS += nvgraph_capi_tests_2d_bfs_net ifdef VULCAN include $(VULCAN_TOOLKIT_BASE)/build/common.mk else include ../../build/common.mk endif
0