#include "task_group_priority.h" __global__ void task_group_priority_kernel( int* group, int* priority, bool* value, bool* output, int batch_size, int task_num, int group_num) { group += blockIdx.x * task_num; priority += blockIdx.x * task_num; value += blockIdx.x * task_num; output += blockIdx.x * task_num; extern __shared__ int temp[]; for(int i=threadIdx.x; i::max(); } __syncthreads(); for(int i=threadIdx.x; i __global__ void cuda_do_task_group_priority( const torch::PackedTensorAccessor<_Tg,2,torch::RestrictPtrTraits> group, const torch::PackedTensorAccessor<_Tp,2,torch::RestrictPtrTraits> priority, const torch::PackedTensorAccessor value, torch::PackedTensorAccessor result, const _Tg NG) { const int NP = group.size(0); const int NT = group.size(1); const int p = blockIdx.x * blockDim.x + threadIdx.x; if(p < NP) { extern __shared__ char _temp[]; auto temp = reinterpret_cast<_Tp*>(_temp); temp += (threadIdx.x * NG); for(_Tg g=0; g::max(); } for(int t=0; t>>( group, priority, value, output, batch_size, task_num, group_num); GRL_CHECK_CUDA(cudaGetLastError()); };