repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/lib/include.py | import os
from datetime import datetime
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__).replace('/lib',''))
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
#numerical libs
import math
import numpy as np
import random
import PIL
#import cv2
import matplotlib
print('matplotlib.get_backend : ', matplotlib.get_backend())
# torch libs
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import *
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
from torch.nn.utils.rnn import *
# std libs
import collections
import copy
import numbers
import inspect
import shutil
from timeit import default_timer as timer
import itertools
from collections import OrderedDict
from multiprocessing import Pool
import multiprocessing as mp
#from pprintpp import pprint, pformat
import json
import zipfile
import csv
import pandas as pd
import pickle
import glob
import sys
from distutils.dir_util import copy_tree
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# constant #
PI = np.pi
INF = np.inf
EPS = 1e-12
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/lib | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/lib/net/rate.py | # learning rate schduler
from mpnn_model.lib.include import *
# http://elgoacademy.org/anatomy-matplotlib-part-1/
def plot_rates(fig, lrs, title=''):
N = len(lrs)
epoches = np.arange(0,N)
#get limits
max_lr = np.max(lrs)
xmin=0
xmax=N
dx=2
ymin=0
ymax=max_lr*1.2
dy=(ymax-ymin)/10
dy=10**math.ceil(math.log10(dy))
ax = fig.add_subplot(111)
#ax = fig.gca()
ax.set_axisbelow(True)
ax.minorticks_on()
ax.set_xticks(np.arange(xmin,xmax+0.0001, dx))
ax.set_yticks(np.arange(ymin,ymax+0.0001, dy))
ax.set_xlim(xmin,xmax+0.0001)
ax.set_ylim(ymin,ymax+0.0001)
ax.grid(b=True, which='minor', color='black', alpha=0.1, linestyle='dashed')
ax.grid(b=True, which='major', color='black', alpha=0.4, linestyle='dashed')
ax.set_xlabel('iter')
ax.set_ylabel('learning rate')
ax.set_title(title)
ax.plot(epoches, lrs)
## simple stepping rates
class StepScheduler():
def __init__(self, pairs):
super(StepScheduler, self).__init__()
N=len(pairs)
rates=[]
steps=[]
for n in range(N):
steps.append(pairs[n][0])
rates.append(pairs[n][1])
self.rates = rates
self.steps = steps
def __call__(self, epoch):
N = len(self.steps)
lr = -1
for n in range(N):
if epoch >= self.steps[n]:
lr = self.rates[n]
return lr
def __str__(self):
string = 'Step Learning Rates\n' \
+ 'rates=' + str(['%7.4f' % i for i in self.rates]) + '\n' \
+ 'steps=' + str(['%7.0f' % i for i in self.steps]) + ''
return string
## https://github.com/pytorch/tutorials/blob/master/beginner_source/transfer_learning_tutorial.py
class DecayScheduler():
def __init__(self, base_lr, decay, step):
super(DecayScheduler, self).__init__()
self.step = step
self.decay = decay
self.base_lr = base_lr
def get_rate(self, epoch):
lr = self.base_lr * (self.decay**(epoch // self.step))
return lr
def __str__(self):
string = '(Exp) Decay Learning Rates\n' \
+ 'base_lr=%0.3f, decay=%0.3f, step=%0.3f'%(self.base_lr, self.decay, self.step)
return string
# 'Cyclical Learning Rates for Training Neural Networks'- Leslie N. Smith, arxiv 2017
# https://arxiv.org/abs/1506.01186
# https://github.com/bckenstler/CLR
class CyclicScheduler1():
def __init__(self, min_lr=0.001, max_lr=0.01, period=10 ):
super(CyclicScheduler, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
def __call__(self, time):
#sawtooth
#r = (1-(time%self.period)/self.period)
#cosine
time= time%self.period
r = (np.cos(time/self.period *PI)+1)/2
lr = self.min_lr + r*(self.max_lr-self.min_lr)
return lr
def __str__(self):
string = 'CyclicScheduler\n' \
+ 'min_lr=%0.3f, max_lr=%0.3f, period=%8.1f'%(self.min_lr, self.max_lr, self.period)
return string
class CyclicScheduler2():
def __init__(self, min_lr=0.001, max_lr=0.01, period=10, max_decay=0.99, warm_start=0 ):
super(CyclicScheduler, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
self.max_decay = max_decay
self.warm_start = warm_start
self.cycle = -1
def __call__(self, time):
if time<self.warm_start: return self.max_lr
#cosine
self.cycle = (time-self.warm_start)//self.period
time = (time-self.warm_start)%self.period
period = self.period
min_lr = self.min_lr
max_lr = self.max_lr *(self.max_decay**self.cycle)
r = (np.cos(time/period *PI)+1)/2
lr = min_lr + r*(max_lr-min_lr)
return lr
def __str__(self):
string = 'CyclicScheduler\n' \
+ 'min_lr=%0.4f, max_lr=%0.4f, period=%8.1f'%(self.min_lr, self.max_lr, self.period)
return string
#tanh curve
class CyclicScheduler3():
def __init__(self, min_lr=0.001, max_lr=0.01, period=10, max_decay=0.99, warm_start=0 ):
super(CyclicScheduler, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
self.max_decay = max_decay
self.warm_start = warm_start
self.cycle = -1
def __call__(self, time):
if time<self.warm_start: return self.max_lr
#cosine
self.cycle = (time-self.warm_start)//self.period
time = (time-self.warm_start)%self.period
period = self.period
min_lr = self.min_lr
max_lr = self.max_lr *(self.max_decay**self.cycle)
r = (np.tanh(-time/period *16 +8)+1)*0.5
lr = min_lr + r*(max_lr-min_lr)
return lr
def __str__(self):
string = 'CyclicScheduler\n' \
+ 'min_lr=%0.3f, max_lr=%0.3f, period=%8.1f'%(self.min_lr, self.max_lr, self.period)
return string
#
# class CyclicScheduler():
#
# def __init__(self, pairs, period=10, max_decay=1, warm_start=0 ):
# super(CyclicScheduler, self).__init__()
#
# self.lrs=[]
# self.steps=[]
# for p in pairs:
# self.steps.append(p[0])
# self.lrs.append(p[1])
#
#
# self.period = period
# self.warm_start = warm_start
# self.max_decay = max_decay
# self.cycle = -1
#
# def __call__(self, time):
# if time<self.warm_start: return self.lrs[0]
#
# self.cycle = (time-self.warm_start)//self.period
# time = (time-self.warm_start)%self.period
#
# rates = self.lrs.copy()
# steps = self.steps
# rates[0] = rates[0] *(self.max_decay**self.cycle)
# lr = -1
# for rate,step in zip(rates,steps):
# if time >= step:
# lr = rate
#
# return lr
#
#
#
# def __str__(self):
# string = 'CyclicScheduler\n' \
# + 'lrs =' + str(['%7.4f' % i for i in self.lrs]) + '\n' \
# + 'steps=' + str(['%7.0f' % i for i in self.steps]) + '\n' \
# + 'period=%8.1f'%(self.period)
# return string
class NullScheduler():
def __init__(self, lr=0.01 ):
super(NullScheduler, self).__init__()
self.lr = lr
self.cycle = 0
def __call__(self, time):
return self.lr
def __str__(self):
string = 'NullScheduler\n' \
+ 'lr=%0.5f '%(self.lr)
return string
# net ------------------------------------
# https://github.com/pytorch/examples/blob/master/imagenet/main.py ###############
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_learning_rate(optimizer):
lr=[]
for param_group in optimizer.param_groups:
lr +=[ param_group['lr'] ]
assert(len(lr)==1) #we support only one param_group
lr = lr[0]
return lr
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
num_iters=125
scheduler = StepScheduler([ (0,0.1), (10,0.01), (25,0.005), (35,0.001), (40,0.0001), (43,-1)])
#scheduler = DecayScheduler(base_lr=0.1, decay=0.32, step=10)
#scheduler = CyclicScheduler(min_lr=0.0001, max_lr=0.01, period=30., warm_start=5) ##exp_range ##triangular2
#scheduler = CyclicScheduler([ (0,0.1), (25,0.01), (45,0.005)], period=50., warm_start=5) ##exp_range ##triangular2
lrs = np.zeros((num_iters),np.float32)
for iter in range(num_iters):
lr = scheduler(iter)
lrs[iter] = lr
if lr<0:
num_iters = iter
break
#print ('iter=%02d, lr=%f %d'%(iter,lr, scheduler.cycle))
#plot
fig = plt.figure()
plot_rates(fig, lrs, title=str(scheduler))
plt.show()
# https://github.com/Jiaming-Liu/pytorch-lr-scheduler/blob/master/lr_scheduler.py
# PVANET plateau lr policy
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/lib | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/lib/utility/draw.py | import os
#qt bug ???
os.environ['QT_XKB_CONFIG_ROOT']='/usr/share/X11/xkb/'
from mpnn_model.lib.include import *
import matplotlib.cm
# draw -----------------------------------
def image_show(name, image, resize=1):
H,W = image.shape[0:2]
cv2.namedWindow(name, cv2.WINDOW_GUI_NORMAL) #WINDOW_NORMAL
#cv2.namedWindow(name, cv2.WINDOW_GUI_EXPANDED) #WINDOW_GUI_EXPANDED
cv2.imshow(name, image.astype(np.uint8))
cv2.resizeWindow(name, round(resize*W), round(resize*H))
def image_show_norm(name, image, max=None, min=None, resize=1):
if max is None: max=image.max()
if min is None: min=image.min()
H,W = image.shape[0:2]
cv2.namedWindow(name, cv2.WINDOW_GUI_NORMAL) #WINDOW_NORMAL
cv2.imshow(name, ((image-min)/(max-min)*255).astype(np.uint8))
cv2.resizeWindow(name, round(resize*W), round(resize*H))
def draw_shadow_text(img, text, pt, fontScale, color, thickness, color1=None, thickness1=None):
if color1 is None: color1=(0,0,0)
if thickness1 is None: thickness1 = thickness+2
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, pt, font, fontScale, color1, thickness1, cv2.LINE_AA)
cv2.putText(img, text, pt, font, fontScale, color, thickness, cv2.LINE_AA)
def to_color_image(image, max=None):
if max is None: max=image.max()
image = (image/max*255).astype(np.uint8)
image = cv2.cvtColor(image,cv2.COLOR_GRAY2BGR)
return image
##http://stackoverflow.com/questions/26690932/opencv-rectangle-with-dotted-or-dashed-lines
def draw_dotted_line(image, pt1, pt2, color, thickness=1, gap=20):
dist =((pt1[0]-pt2[0])**2+(pt1[1]-pt2[1])**2)**.5
pts= []
for i in np.arange(0,dist,gap):
r=i/dist
x=int((pt1[0]*(1-r)+pt2[0]*r)+.5)
y=int((pt1[1]*(1-r)+pt2[1]*r)+.5)
p = (x,y)
pts.append(p)
if gap==1:
for p in pts:
cv2.circle(image,p,thickness,color,-1,cv2.LINE_AA)
else:
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
for p, q in pairwise(pts):
cv2.line(image,p, q, color,thickness,cv2.LINE_AA)
def draw_dotted_poly(image, pts, color, thickness=1, gap=20):
s=pts[0]
e=pts[0]
pts.append(pts.pop(0))
for p in pts:
s=e
e=p
draw_dotted_line(image,s,e,color,thickness,gap)
def draw_dotted_rect(image, pt1, pt2, color, thickness=1, gap=3):
pts = [pt1,(pt2[0],pt1[1]),pt2,(pt1[0],pt2[1])]
draw_dotted_poly(image, pts, color, thickness, gap)
def draw_screen_rect(image, pt1, pt2, color, alpha=0.5):
x1, y1 = pt1
x2, y2 = pt2
image[y1:y2,x1:x2,:] = (1-alpha)*image[y1:y2,x1:x2,:] + (alpha)*np.array(color, np.uint8)
# def draw_mask(image, mask, color=(255,255,255), α=1, β=0.25, λ=0., threshold=32 ):
# # image * α + mask * β + λ
#
# if threshold is None:
# mask = mask/255
# else:
# mask = clean_mask(mask,threshold,1)
#
# mask = np.dstack((color[0]*mask,color[1]*mask,color[2]*mask)).astype(np.uint8)
# image[...] = cv2.addWeighted(image, α, mask, β, λ)
#
# def draw_contour(image, mask, color=(0,255,0), thickness=1, threshold=127):
# ret, thresh = cv2.threshold(mask,threshold,255,0)
# ret = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# hierarchy = ret[0]
# contours = ret[1]
# #image[...]=image
# cv2.drawContours(image, contours, -1, color, thickness, cv2.LINE_AA)
# ## drawContours(image, contours, contourIdx, color, thickness=None, lineType=None, hierarchy=None, maxLevel=None, offset=None): # real signature unknown; restored from __doc__
#
#
def to_color(s, color=None):
if type(color) in [str] or color is None:
#https://matplotlib.org/xkcd/examples/color/colormaps_reference.html
if color is None: color='cool'
color = matplotlib.get_cmap(color)(s)
b = int(255*color[2])
g = int(255*color[1])
r = int(255*color[0])
elif type(color) in [list,tuple]:
b = int(s*color[0])
g = int(s*color[1])
r = int(s*color[2])
return b,g,r
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
image = np.zeros((50,50,3), np.uint8)
cv2.rectangle(image, (0,0),(49,49), (0,0,255),1) #inclusive
image[8,8]=[255,255,255]
image_show('image',image,10)
cv2.waitKey(0)
print('\nsucess!') | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/lib | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/lib/utility/file.py | from mpnn_model.lib.include import *
import builtins
import re
class Struct(object):
def __init__(self, is_copy=False, **kwargs):
self.add(is_copy, **kwargs)
def add(self, is_copy=False, **kwargs):
#self.__dict__.update(kwargs)
if is_copy == False:
for key, value in kwargs.items():
setattr(self, key, value)
else:
for key, value in kwargs.items():
try:
setattr(self, key, copy.deepcopy(value))
#setattr(self, key, value.copy())
except Exception:
setattr(self, key, value)
def __str__(self):
return str(self.__dict__.keys())
# log ------------------------------------
def remove_comments(lines, token='#'):
""" Generator. Strips comments and whitespace from input lines.
"""
l = []
for line in lines:
s = line.split(token, 1)[0].strip()
if s != '':
l.append(s)
return l
def open(file, mode=None, encoding=None):
if mode == None: mode = 'r'
if '/' in file:
if 'w' or 'a' in mode:
dir = os.path.dirname(file)
if not os.path.isdir(dir): os.makedirs(dir)
f = builtins.open(file, mode=mode, encoding=encoding)
return f
def remove(file):
if os.path.exists(file): os.remove(file)
def empty(dir):
if os.path.isdir(dir):
shutil.rmtree(dir, ignore_errors=True)
else:
os.makedirs(dir)
# http://stackoverflow.com/questions/34950201/pycharm-print-end-r-statement-not-working
class Logger(object):
def __init__(self):
self.terminal = sys.stdout #stdout
self.file = None
def open(self, file, mode=None):
if mode is None: mode ='w'
self.file = open(file, mode)
def write(self, message, is_terminal=1, is_file=1 ):
if '\r' in message: is_file=0
if is_terminal == 1:
self.terminal.write(message)
self.terminal.flush()
#time.sleep(1)
if is_file == 1:
self.file.write(message)
self.file.flush()
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
# io ------------------------------------
def write_list_to_file(list_file, strings):
with open(list_file, 'w') as f:
for s in strings:
f.write('%s\n'%str(s))
pass
def read_list_from_file(list_file, comment='#'):
with open(list_file) as f:
lines = f.readlines()
strings=[]
for line in lines:
if comment is not None:
s = line.split(comment, 1)[0].strip()
else:
s = line.strip()
if s != '':
strings.append(s)
return strings
def read_pickle_from_file(pickle_file):
with open(pickle_file,'rb') as f:
x = pickle.load(f)
return x
def write_pickle_to_file(pickle_file, x):
with open(pickle_file, 'wb') as f:
pickle.dump(x, f, pickle.HIGHEST_PROTOCOL)
# backup ------------------------------------
#https://stackoverflow.com/questions/1855095/how-to-create-a-zip-archive-of-a-directory
def backup_project_as_zip(project_dir, zip_file):
assert(os.path.isdir(project_dir))
assert(os.path.isdir(os.path.dirname(zip_file)))
shutil.make_archive(zip_file.replace('.zip',''), 'zip', project_dir)
pass
# etc ------------------------------------
def time_to_str(t, mode='min'):
if mode=='min':
t = int(t)/60
hr = t//60
min = t%60
return '%2d hr %02d min'%(hr,min)
elif mode=='sec':
t = int(t)
min = t//60
sec = t%60
return '%2d min %02d sec'%(min,sec)
else:
raise NotImplementedError
def np_float32_to_uint8(x, scale=255):
return (x*scale).astype(np.uint8)
def np_uint8_to_float32(x, scale=255):
return (x/scale).astype(np.float32)
def int_tuple(x):
return tuple( [int(round(xx)) for xx in x] )
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_RNN_PREDICT_TYPE_LMAE_WO_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: False
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: True
y_range: [-36.2186, 204.8800]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: lmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: mpnn_gauss_rank_MLMAE_2CE_RNN_V3_type_seq_v3_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_RNN_EMBED_TYPE_LMAE_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: True
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 800
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 1
predict_type: False
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 90
max_lr: 0.005
loss_name: lmae_embed_type
callback_metric: LMAE
pretrain: False
predict_type: False
model_name: MPNN_RNN_EMBED_TYPE_GAUSSRANK_LMAE
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_PREDICT_TYPE_MLMAE_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: False
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: mlmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: MPNN_GAUSSRANK_PREDICT_TYPE_MLMAE_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_PREDICT_TYPE_LMAE_WO_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: False
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: False
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: lmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: MPNN_WO_GAUSSRANK_PREDICT_TYPE_LMAE_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_EMBED_TYPE_LMAE_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: False
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 512
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 1
predict_type: False
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 90
max_lr: 0.005
loss_name: lmae_embed_type
callback_metric: LMAE
pretrain: False
predict_type: False
model_name: MPNN_EMBED_TYPE_GAUSSRANK_LMAE
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_PREDICT_TYPE_MLMAE_GAUSSRANK_BOOTSTRAP.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: False
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: mlmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: MPNN_GAUSSRANK_PREDICT_TYPE_MLMAE_BOOTSTRAP_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_RNN_PREDICT_TYPE_MLMAE_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: True
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: mlmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: MPNN_RNN_PREDICT_TYPE_MLMAE_GAUSSRANK
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_EMBED_TYPE_LMAE_WO_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: False
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: False
y_range: [-36.2186, 204.8800]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 512
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 1
predict_type: False
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 90
max_lr: 0.005
loss_name: lmae_embed_type
callback_metric: LMAE
pretrain: False
predict_type: False
model_name: MPNN_RNN_EMBED_TYPE_WO_GAUSSRANK_LMAE
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_RNN_PREDICT_TYPE_LMAE_GAUSSRANK_BOOTSTRAP.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: True
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: lmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: MPNN_RNN_GAUSSRANK_PREDICT_TYPE_LMAE_BOOTSTRAP_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/.nfs0000000001371b7a000004c5 | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output
script_path: /rapids/notebooks/srabhi/champs-2019/CherKeng_solution/fastai_code/dataset.py
graph_path: /rapids/notebooks/srabhi/champs-2019/input/structure/graph4
normalize: False
model:
script_path: /rapids/notebooks/srabhi/champs-2019/CherKeng_solution/fastai_code/model.py
num_target : 8
mpnn:
T_steps: 6
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 6
num_layer: 1
in_channel: 128
batch_size: 64
y_range: [-2.326753765513524, 2.3267537655135464]
regression:
num_output: 1
node_dim: 128
shared_layers: [1024, 512]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
Classif:
y_range: [-2.326753765513524, 2.3267537655135464]
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 180
max_lr: 0.005
loss_name: lmae
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: mpnn_gauss_rank_predict_type_180epochs_
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_RNN_PREDICT_TYPE_MLMAE_GAUSSRANK_BOOTSTRAP.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: True
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: mlmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: MPNN_RNN_GAUSSRANK_PREDICT_TYPE_MLMAE_BOOTSTRAP_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_RNN_MAE_GAUSSRANK_SINGLE_TYPE.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 1
RNN : True
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [128, 64]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
num_target: 1
predict_type: False
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.001
loss_name: lmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: False
model_name: MPNN_RNN_SINGLE_TYPE_GAUSSRANK_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_RNN_MAE_WO_GAUSSRANK_SINGLE_TYPE.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: False
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 1
RNN : True
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [128, 64]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
num_target: 1
predict_type: False
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.001
loss_name: lmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: False
model_name: MPNN_RNN_SINGLE_TYPE_WO_GAUSSRANK_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_RNN_EMBED_TYPE_LMAE_WO_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: False
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: True
y_range: [-36.2186, 204.8800]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 800
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 1
predict_type: False
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 90
max_lr: 0.005
loss_name: lmae_embed_type
callback_metric: LMAE
pretrain: False
predict_type: False
model_name: MPNN_RNN_EMBED_TYPE_WO_GAUSSRANK_LMAE
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_PREDICT_TYPE_LMAE_GAUSSRANK_BOOTSTRAP.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: False
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: lmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: MPNN_GAUSSRANK_PREDICT_TYPE_LMAE_BOOTSTRAP_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_RNN_PREDICT_TYPE_LMAE_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/final_solution/mpnn_model/model.py
num_type: 8
RNN: True
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: lmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: MPNN_RNN_PREDICT_TYPE_LMAE_GAUSSRANK
device: cuda
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/experiments/MPNN_PREDICT_TYPE_LMAE_GAUSSRANK.yaml | dataset:
input_path: /rapids/notebooks/srabhi/champs-2019/input
output_path: /rapids/notebooks/srabhi/champs-2019/output/
script_path: /rapids/notebooks/srabhi/champs-2019/CherKeng_solution/fastai_code/dataset.py
normalize: False
gaussrank: True
model:
script_path: /rapids/notebooks/srabhi/champs-2019/CherKeng_solution/fastai_code/model.py
num_type: 8
RNN: False
y_range: [-2.326753765513524, 2.3267537655135464]
mpnn:
T_steps: 4
node_encoder:
encoding: label
emb_sz: [[6,3], [3,3], [3, 3], [3,3], [5,3], [8, 4]]
n_cont: 1
node_dim: 7
layers: [128, 128]
activation: relu
dropout: 0.
edge_encoder:
encoding: label
emb_sz: [[5,3]]
n_cont: 2
node_dim: 128
edge_dim: 3
layers: [256, 128]
activation: relu
dropout: 0.
Set2Set:
processing_step: 4
num_layer: 1
in_channel: 128
batch_size: 64
regression:
num_output: 1
input_dim: 768
shared_layers: [1024, 512, 128]
activation: relu
dropout: 0.
branch_layers: [512, 128]
num_target: 8
predict_type: True
node_seq:
node_dim: 128
hidden_size: 256
num_layers: 1
dropout: 0.05
batch_first: True
bidirectional: True
rnn_model: 'LSTM'
attention: True
train:
train_shape: 4658147
test_shape: 2505542
batch_size: 64
epochs: 1
max_lr: 0.005
loss_name: lmaeo2ceha
callback_metric: LMAE
pretrain: False
predict_type: True
model_name: MPNN_GAUSSRANK_PREDICT_TYPE_LMAE_
device: cuda
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020/README.md | ## GPU Accelerated Feature Engineering and Training for Recommender Systems (source)
This content was moved to a new [competition repository](https://github.com/NVIDIA-Merlin/competitions).
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/pytorch/README.md | [PyTorch](https://pytorch.org/) is an open source machine learning framework designed to accelerate the path from research prototyping to production deployment. RAPIDS is an active contributor to PyTorch, developing preprocessing functionality and dataloading on the GPU, along with improvements to kernels and optimizers critical to deep learning on tabular data.
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/optimizers/adamw.py | import math
import os
from distutils.util import strtobool
import torch
from torch.optim.optimizer import Optimizer
from torch.hub import _check_module_exists
NUMBA_CUDA_EXIST = False
NUMBA_CUDA_THREAD_PER_BLOCK = 512
if not strtobool(os.environ.get('NO_NUMBA', 'n')) and _check_module_exists("numba.cuda"):
import numba.cuda
NUMBA_CUDA_EXIST = numba.cuda.is_available()
@numba.cuda.jit()
def numba_cuda_kernel(param, grad, exp_avg, exp_avg_sq, beta1,
beta2, step_size, bias_correction2, eps,
weight_decay):
i = numba.cuda.grid(1)
if i >= param.size:
return
exp_avg[i] = exp_avg[i] * beta1 + (1 - beta1) * grad[i]
exp_avg_sq[i] = exp_avg_sq[i] * beta2 + (1 - beta2) * grad[i] * grad[i]
denom = math.sqrt(exp_avg_sq[i]) / bias_correction2 + eps
param[i] *= weight_decay
param[i] = param[i] + (-step_size) * (exp_avg[i] / denom)
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
# In order to reduce Numba overhead, we save the device arrays
# between calls to `step()` in `_nbstate`.
self._nbstate = getattr(self, '_nbstate', {})
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for param in group['params']:
if param.grad is None:
continue
# Perform optimization step
grad = param.grad.data
p = param.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients,'
'please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[param]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p)
elif NUMBA_CUDA_EXIST and numba.cuda.is_cuda_array(p.data):
self._nbstate[param] = {
'param': numba.cuda.as_cuda_array(p.data.flatten()),
'grad': numba.cuda.as_cuda_array(grad.flatten()),
'exp_avg': numba.cuda.as_cuda_array(state['exp_avg'].data.flatten()),
'exp_avg_sq': numba.cuda.as_cuda_array(state['exp_avg_sq']
.data.flatten()),
'blockspergrid': math.ceil(p.data.numel() / NUMBA_CUDA_THREAD_PER_BLOCK)
}
weight_decay = 1 - group['lr'] * group['weight_decay']
eps = group['eps']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = math.sqrt(1 - beta2 ** state['step'])
step_size = group['lr'] / bias_correction1
if param in self._nbstate:
s = self._nbstate[param]
numba_cuda_kernel[s['blockspergrid'],
NUMBA_CUDA_THREAD_PER_BLOCK](s['param'],
s['grad'],
s['exp_avg'],
s['exp_avg_sq'],
beta1, beta2,
step_size,
bias_correction2,
eps, weight_decay)
else:
exp_avg = state['exp_avg'].data
exp_avg_sq = state['exp_avg_sq'].data
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / bias_correction2).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / bias_correction2).add_(eps)
# Perform stepweight decay
p.data.mul_(weight_decay)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/optimizers/README.md | # pytorch-optimizers
Numba accelerated PyTorch Optimizers
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/optimizers/adam.py | import math
import os
from distutils.util import strtobool
import torch
from torch.optim.optimizer import Optimizer
from torch.hub import _check_module_exists
NUMBA_CUDA_EXIST = False
NUMBA_CUDA_THREAD_PER_BLOCK = 512
if not strtobool(os.environ.get('NO_NUMBA', 'n')) and _check_module_exists("numba.cuda"):
import numba.cuda
NUMBA_CUDA_EXIST = numba.cuda.is_available()
@numba.cuda.jit()
def numba_cuda_kernel(param, grad, exp_avg, exp_avg_sq, beta1,
beta2, step_size, bias_correction2, eps,
weight_decay):
i = numba.cuda.grid(1)
if i >= param.size:
return
if weight_decay != 0:
grad[i] += weight_decay * param[i]
exp_avg[i] = exp_avg[i] * beta1 + (1 - beta1) * grad[i]
exp_avg_sq[i] = exp_avg_sq[i] * beta2 + (1 - beta2) * grad[i] * grad[i]
denom = math.sqrt(exp_avg_sq[i]) / bias_correction2 + eps
param[i] = param[i] + (-step_size) * (exp_avg[i] / denom)
class Adam(Optimizer):
r"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
# In order to reduce Numba overhead, we save the device arrays
# between calls to `step()` in `_nbstate`.
self._nbstate = getattr(self, '_nbstate', {})
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for param in group['params']:
if param.grad is None:
continue
# Perform optimization step
grad = param.grad.data
p = param.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients,'
'please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[param]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p)
elif NUMBA_CUDA_EXIST and numba.cuda.is_cuda_array(p.data):
self._nbstate[param] = {
'param': numba.cuda.as_cuda_array(p.data.flatten()),
'grad': numba.cuda.as_cuda_array(grad.flatten()),
'exp_avg': numba.cuda.as_cuda_array(state['exp_avg'].data.flatten()),
'exp_avg_sq': numba.cuda.as_cuda_array(state['exp_avg_sq'].
data.flatten()),
'blockspergrid': math.ceil(p.data.numel() / NUMBA_CUDA_THREAD_PER_BLOCK)
}
weight_decay = group['weight_decay']
eps = group['eps']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = math.sqrt(1 - beta2 ** state['step'])
step_size = group['lr'] / bias_correction1
if param in self._nbstate:
s = self._nbstate[param]
numba_cuda_kernel[s['blockspergrid'],
NUMBA_CUDA_THREAD_PER_BLOCK](s['param'],
s['grad'],
s['exp_avg'],
s['exp_avg_sq'],
beta1, beta2,
step_size,
bias_correction2,
eps, weight_decay)
else:
if weight_decay != 0:
grad.add_(weight_decay, p.data)
exp_avg = state['exp_avg'].data
exp_avg_sq = state['exp_avg_sq'].data
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / bias_correction2).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / bias_correction2).add_(eps)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/optimizers/radam.py | import math
import os
import torch
from torch.optim.optimizer import Optimizer, required
from distutils.util import strtobool
from torch.hub import _check_module_exists
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, warmup = warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']
else:
scheduled_lr = group['lr']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss
NUMBA_CUDA_EXIST = False
NUMBA_CUDA_THREAD_PER_BLOCK = 512
if not strtobool(os.environ.get('NO_NUMBA', 'n')) and _check_module_exists("numba.cuda"):
import numba.cuda
NUMBA_CUDA_EXIST = numba.cuda.is_available()
@numba.cuda.jit()
def numba_cuda_kernel(param, grad, exp_avg, exp_avg_sq, beta1,
beta2, step_size, eps,
weight_decay, N_sma):
i = numba.cuda.grid(1)
if i >= param.size:
return
exp_avg[i] = exp_avg[i] * beta1 + (1 - beta1) * grad[i]
exp_avg_sq[i] = exp_avg_sq[i] * beta2 + (1 - beta2) * grad[i] * grad[i]
if weight_decay != 0:
grad[i] += weight_decay * param[i]
if N_sma >= 5:
denom = math.sqrt(exp_avg_sq[i]) + eps
param[i] = param[i] + (-step_size) * (exp_avg[i] / denom)
else:
param[i] = param[i] + (-step_size) * exp_avg[i]
class FusedRAdam(Optimizer):
r"""Implements RAdam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(FusedRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(FusedRAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
# In order to reduce Numba overhead, we save the device arrays
# between calls to `step()` in `_nbstate`.
self._nbstate = getattr(self, '_nbstate', {})
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for param in group['params']:
if param.grad is None:
continue
# Perform optimization step
grad = param.grad.data
p = param.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients,'
'please consider SparseAdam instead')
state = self.state[param]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
if NUMBA_CUDA_EXIST and numba.cuda.is_cuda_array(p.data):
self._nbstate[param] = {
'param': numba.cuda.as_cuda_array(p.data.flatten()),
'grad': numba.cuda.as_cuda_array(grad.flatten()),
'exp_avg': numba.cuda.as_cuda_array(state['exp_avg'].data.flatten()),
'exp_avg_sq': numba.cuda.as_cuda_array(state['exp_avg_sq'].
data.flatten()),
'blockspergrid': math.ceil(p.data.numel() / NUMBA_CUDA_THREAD_PER_BLOCK)
}
weight_decay = group['weight_decay']
eps = group['eps']
beta1, beta2 = group['betas']
exp_avg = state['exp_avg'].data
exp_avg_sq = state['exp_avg_sq'].data
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
if param in self._nbstate:
s = self._nbstate[param]
numba_cuda_kernel[s['blockspergrid'],
NUMBA_CUDA_THREAD_PER_BLOCK](s['param'],
s['grad'],
s['exp_avg'],
s['exp_avg_sq'],
beta1, beta2,
step_size,
eps,
weight_decay,
N_sma)
else:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if weight_decay != 0:
grad.add_(-weight_decay * group['lr'], p.data)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p.addcdiv_(-step_size, exp_avg, denom)
else:
p.add_(-step_size, exp_avg)
p.data.copy_(p.data)
return loss
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/optimizers/__init__.py | from .adam import Adam
from .adamw import AdamW
from .radam import RAdam, PlainRAdam, FusedRAdam
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/optimizers/setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='pytorch_optimizers',
version='0.0.1',
description='Numba accelerated PyTorch Optimizers',
# The project's main homepage.
url='https://github.com/madsbk/pytorch-optimizers',
# Author details
author='Mads R. B. Kristensen',
author_email='madsbk@gmail.com',
# Choose your license
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache License 2.0',
'Programming Language :: Python :: 3',
],
# What does your project relate to?
keywords='PyTorch',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'torch',
],
)
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/optimizers/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/batch_dataloader/batch_dataset.py | import torch
class BatchDataset(object):
"""An abstract class representing a Batch Dataset.
All other datasets should subclass this. All subclasses should override
``__len__``, which provides the size of the dataset, ``__getitem__``,
supporting integer indexing of batches in range from 0 to len(self)//batchsize exclusive,
and ``shuffle`` which randomly shuffles the data, generally called per epoch.
Batch datasets are meant to be iterated over in order rather than randomly accessed
so the randomization has to happen first.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __add__(self):
raise NotImplementedError
def shuffle(self):
raise NotImplementedError
class TensorBatchDataset(BatchDataset):
"""Batch Dataset wrapping Tensors.
Arguments:
*tensors (Tensor): tensors that have the same size of the first dimension.
batch_size: The size of the batch to return
pin_memory (bool, optional): If ``True``, the dataset will be pinned memory for faster copy to GPU.
I saw no performance improvement to doing so but results may vary.
"""
def __init__(self, tensors, batch_size=1, pin_memory=False):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.batch_size=batch_size
self.num_samples = tensors[0].size(0)
if pin_memory:
for tensor in self.tensors:
tensor.pin_memory()
def __len__(self):
if self.num_samples%self.batch_size == 0:
return self.num_samples // self.batch_size
else:
return self.num_samples // self.batch_size + 1
def __getitem__(self, item):
idx = item*self.batch_size
#Need to handle odd sized batches if data isn't divisible by batchsize
if idx < self.num_samples and (idx + self.batch_size < self.num_samples or self.num_samples%self.batch_size == 0):
return [tensor[idx:idx+self.batch_size] for tensor in self.tensors]
elif idx < self.num_samples and idx + self.batch_size> self.num_samples :
return [tensor[idx:] for tensor in self.tensors]
else:
raise IndexError
def __add__(self, tensors):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
assert len(self.tensors) == len(tensors)
assert all(self_tensor[0].shape == tensor[0].shape for self_tensor, tensor in zip(self.tensors, tensors))
num_add_samples = tensors[0].size(0)
self.num_samples = self.num_samples + num_add_samples
self.tensors = [torch.cat((self_tensor, tensor)) for self_tensor, tensor in zip(self.tensors, tensors)]
def shuffle(self):
idx = torch.randperm(self.num_samples, dtype=torch.int64)
self.tensors = [tensor[idx] for tensor in self.tensors]
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/batch_dataloader/README.md | ### Pytorch Batch Dataloader
## 🚀 Feature
A dataloader and dataset that operate at the batch level, rather than the item level, pulling batches from contiguous blocks of memory and avoiding random access patterns in the dataloader.
## Motivation
Loading data item by item and coallating into a batch is very inefficient, particularly in the case of tabular or text data where the items are small. This is compounded further when you want to use large batch sizes. By pre shuffling the data each epoch (when required) we can grab each batch as a single read from contiguous memory. This much faster and scales better with batch size, removing the necessity of multiprocessing, which adds complexity in the form of bus errors when not enough shared memory is available (https://github.com/pytorch/pytorch/issues/5040), CUDA init issues when forking (https://github.com/pytorch/pytorch/issues/4377), etc. This forking issue was one of my original motivations as it solves the issue of using the dataloader in conjunction with RAPIDS or any other code that calls CUDA before the dataloader workers are forked. It should also solve the issue on windows with the speed of dataloaders, at least for tabular and text data, (https://github.com/pytorch/pytorch/issues/12831) as spawning is not necessary.
Using the proposed method results in better GPU utilization, and better throughput when training in the tests on tabular data that I've run. With no multiprocessing I've measured a 5-15% improvement* in throughput over an 8 worker vanilla dataloader (more were tried but it maxed out at 8). I've also been able to increase batch sizes for tabular data into the 800K+ range with no loss of accuracy and get a 2x performance improvement over the best multiprocessor dataloader I could run without running into bus error issues that cropped up with large batch sizes.
*depends on tensor and batch size
## Pitch
I've created source for a batch dataloader and batch dataset modelled after their vanilla counterparts and would love to see it integrated into the PyTorch repo. Usage is similar, and I've tried to stick to the pytorch variable naming and formatting.
Code can be found here: https://github.com/rapidsai/dataloaders/tree/main/pytorch/batch_dataloader
It should hopefully be ready to go; I've tested it with both base pytorch and with ignite, but more eyes on it would definitely be beneficial, particularly in use cases beyond tabular like text or small images. It should be applicable to anyone who isn't doing large images or a lot of image augmentation. It's undergone an internal (NVidia) review of @ptrblck who was immensely helpful in refining it and @ngimel who reviewed the codebase and had helpful suggestions regarding memory pinning.
I'm happy to work with the team to create test cases similar to those for dataset and dataloader and would love feedback on it.
## Alternatives
One possible solution to the CUDA Init before fork issue is to spawn, however as seen in windows this is significantly slower and I had trouble getting it working.
## Additional context
I'm also working on versions of this that work with larger than CPU memory datasets and on a version that works in GPU memory doing a 0-copy transform of a rapids cudf dataframe via dlpack.
| 0 |
rapidsai_public_repos/deeplearning/pytorch | rapidsai_public_repos/deeplearning/pytorch/batch_dataloader/batch_dataloader.py | import torch
from torch import _utils
class BatchDataLoader(object):
"""Batch Data loader. Takes in a batch dataset and returns iterators that return whole batches of data.
Arguments:
batchdataset (BatchDataset): dataset from which to load the data.
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
pin_memory (bool, optional): If ``True``, the data loader will copy tensors
into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
"""
def __init__(self, batchdataset, shuffle=False,
pin_memory=False, drop_last=False):
self.batchdataset = batchdataset
self.batch_size = batchdataset.batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
def __iter__(self):
return _BatchDataLoaderIter(self)
def __len__(self):
if self.drop_last and self.batchdataset.num_samples%self.batch_size != 0:
return len(self.batchdataset)-1
else:
return len(self.batchdataset)
class _BatchDataLoaderIter(object):
"""Iterates once over the BatchDataLoader's batchdataset, shuffling if requested"""
def __init__(self, loader):
self.batchdataset = loader.batchdataset
self.batch_size = loader.batch_size
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.drop_last = loader.drop_last
if loader.shuffle:
self.batchdataset.shuffle()
self.idx = 0
def __len__(self):
if self.drop_last and self.batchdataset.num_samples%self.batch_size != 0:
return len(self.batchdataset)-1
else:
return len(self.batchdataset)
def __next__(self):
if self.idx >= len(self):
raise StopIteration
batch = self.batchdataset[self.idx]
# Note Pinning memory was ~10% _slower_ for the test examples I explored
if self.pin_memory:
batch = _utils.pin_memory.pin_memory_batch(batch)
self.idx = self.idx+1
return batch
next = __next__ # Python 2 compatibility
def __iter__(self):
return self
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/WSDM2021/README.md | # Using Deep Learning to Win the Booking.com WSDM WebTour21 Challenge on Sequential Recommendations
This content was moved to a new [competition repository](https://github.com/NVIDIA-Merlin/competitions).
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/cmake-format-rapids-cmake.json | {
"parse": {
"additional_commands": {
"rapids_cmake_build_type": {
"pargs": {
"nargs": 1
}
},
"rapids_cmake_install_lib_dir": {
"pargs": {
"nargs": 1,
"flags": ["MODIFY_INSTALL_LIBDIR"]
}
},
"rapids_cmake_make_global": {
"pargs": {
"nargs": 1
}
},
"rapids_cmake_parse_version": {
"pargs": {
"nargs": 3
}
},
"rapids_cmake_policy": {
"pargs": {
"nargs": 0
},
"kwargs": {
"DEPRECATED_IN": 1,
"REMOVED_IN": 1,
"MESSAGE": 1
}
},
"rapids_cmake_support_conda_env": {
"pargs": {
"nargs": 1,
"flags": ["MODIFY_PREFIX_PATH"]
}
},
"rapids_cmake_write_git_revision_file": {
"pargs": {
"nargs": 2
},
"kwargs": {
"PREFIX": 1
}
},
"rapids_cmake_write_version_file": {
"pargs": {
"nargs": 1
},
"kwargs": {
"PREFIX": 1
}
},
"rapids_cpm_find": {
"pargs": {
"nargs": "2+"
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1,
"GLOBAL_TARGETS": "+",
"CPM_ARGS": "+",
"DOWNLOAD_ONLY": "1",
"EXCLUDE_FROM_ALL": "1",
"GIT_REPOSITORY": "1",
"GIT_TAG": "1",
"GIT_SHALLOW": "1",
"OPTIONS": "+",
"PATCH_COMMAND": "1"
}
},
"rapids_cpm_init": {
"pargs": {
"nargs": 0
},
"kwargs": {
"OVERRIDE": 1
}
},
"rapids_cpm_package_override": {
"pargs": {
"nargs": 1
}
},
"rapids_cpm_cuco": {
"pargs": {
"nargs": 0
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1
}
},
"rapids_cpm_fmt": {
"pargs": {
"nargs": 0
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1
}
},
"rapids_cpm_gbench": {
"pargs": {
"nargs": 0,
"flags": ["BUILD_STATIC"]
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1
}
},
"rapids_cpm_gtest": {
"pargs": {
"nargs": 0
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1
}
},
"rapids_cpm_libcudacxx": {
"pargs": {
"nargs": 0
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1
}
},
"rapids_cpm_nvbench": {
"pargs": {
"nargs": 0,
"flags": ["BUILD_STATIC"]
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1
}
},
"rapids_cpm_nvcomp": {
"pargs": {
"nargs": 0
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1,
"USE_PROPRIETARY_BINARY": 1
}
},
"rapids_cpm_rmm": {
"pargs": {
"nargs": 0
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1
}
},
"rapids_cpm_spdlog": {
"pargs": {
"nargs": 0
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"FMT_OPTION": 1,
"INSTALL_EXPORT_SET": 1
}
},
"rapids_cpm_thrust": {
"pargs": {
"nargs": 2
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1
}
},
"rapids_cuda_init_architectures": {
"pargs": {
"nargs": 1
}
},
"rapids_cuda_init_runtime": {
"pargs": {
"nargs": 2
}
},
"rapids_cuda_set_architectures": {
"pargs": {
"nargs": 1
}
},
"rapids_cuda_set_runtime": {
"pargs": {
"nargs": 3
}
},
"rapids_export_cpm": {
"pargs": {
"nargs": "3+",
"flags": ["INSTALL", "BUILD"]
},
"kwargs": {
"GLOBAL_TARGETS": "+",
"CPM_ARGS": "+"
}
},
"rapids_export": {
"pargs": {
"nargs": "2+",
"flags": ["INSTALL", "BUILD"]
},
"kwargs": {
"EXPORT_SET": 1,
"NAMESPACE": 1,
"DOCUMENTATION": 1,
"FINAL_CODE_BLOCK": 1,
"VERSION": 1,
"GLOBAL_TARGETS": "+",
"COMPONENTS": "+",
"COMPONENTS_EXPORT_SET": "+",
"LANGUAGES": "+"
}
},
"rapids_export_find_package_file": {
"pargs": {
"nargs": "3+",
"flags": ["INSTALL", "BUILD"]
},
"kwargs": {
"EXPORT_SET": 1,
"CONDITION": 1
}
},
"rapids_export_find_package_root": {
"pargs": {
"nargs": "3+",
"flags": ["INSTALL", "BUILD"]
},
"kwargs": {
"EXPORT_SET": 1,
"CONDITION": 1
}
},
"rapids_export_package": {
"pargs": {
"nargs": "1+"
},
"kwargs": {
"GLOBAL_TARGETS": "+",
"INSTALL": 2,
"BUILD": 2
}
},
"rapids_export_write_dependencies": {
"pargs": {
"nargs": 3,
"flags": ["INSTALL", "BUILD"]
}
},
"rapids_export_write_language": {
"pargs": {
"nargs": 3,
"flags": ["INSTALL", "BUILD"]
}
},
"rapids_find_generate_module": {
"pargs": {
"nargs": "1+",
"flags": [
"NO_CONFIG"
]
},
"kwargs": {
"VERSION": 1,
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1,
"HEADER_NAMES": "+",
"LIBRARY_NAMES": "+",
"INCLUDE_SUFFIXES": "+"
}
},
"rapids_find_package": {
"pargs": {
"nargs": "1+",
"flags": [
"REQUIRED"
]
},
"kwargs": {
"BUILD_EXPORT_SET": 1,
"INSTALL_EXPORT_SET": 1,
"GLOBAL_TARGETS": "+",
"FIND_ARGS": "+"
}
},
"rapids_cython_init": {
"pargs": {
"nargs": "0"
}
},
"rapids_cython_create_modules": {
"pargs": {
"nargs": "0"
},
"kwargs": {
"SOURCE_FILES": "*",
"LINKED_LIBRARIES": "*",
"INSTALL_DIR": "1"
}
},
"rapids_cython_add_rpath_entries": {
"pargs": {
"nargs": "0"
},
"kwargs": {
"PATHS": "+",
"TARGET": "1",
"ROOT_DIRECTORY": "1"
}
},
"rapids_test_init": {
"pargs": {
"nargs": "0"
}
},
"rapids_test_add": {
"pargs": {
"nargs": "0"
},
"kwargs": {
"NAME": "1",
"COMMAND": "*",
"INSTALL_COMPONENT_SET": "1",
"GPUS": "1",
"PERCENT": "1",
"WORKING_DIRECTORY": "1"
}
},
"rapids_test_gpu_requirements": {
"pargs": {
"nargs": "1"
},
"kwargs": {
"GPUS": "1",
"PERCENT": "1"
}
},
"rapids_test_generate_resource_spec": {
"pargs": {
"nargs": "2"
}
},
"rapids_test_install_relocatable": {
"pargs": {
"nargs": "0",
"flags": ["INCLUDE_IN_ALL"]
},
"kwargs": {
"INSTALL_COMPONENT_SET": "1",
"DESTINATION": "1"
}
}
}
}
}
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/.pre-commit-config.yaml | # Copyright (c) 2023, NVIDIA CORPORATION.
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: trailing-whitespace
exclude: |
(?x)^(
^rapids-cmake/cpm/patches/.*
)
- id: end-of-file-fixer
exclude: |
(?x)^(
^rapids-cmake/cpm/patches/.*
)
- id: check-json
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v16.0.6
hooks:
- id: clang-format
types_or: [c, c++, cuda]
args: ["-fallback-style=none", "-style=file", "-i"]
- repo: https://github.com/codespell-project/codespell
rev: v2.2.2
hooks:
- id: codespell
- repo: local
hooks:
- id: copyright-check
name: copyright-check
entry: python ./ci/checks/copyright.py --git-modified-only --update-current-year
language: python
pass_filenames: false
additional_dependencies: [gitpython]
- id: cmake-format
name: cmake-format
entry: ./ci/checks/run-cmake-format.sh cmake-format
language: python
types: [cmake]
# Note that pre-commit autoupdate does not update the versions
# of dependencies, so we'll have to update this manually.
additional_dependencies:
- cmakelang==0.6.13
verbose: true
require_serial: true
files: |
(?x)^(
^rapids-cmake/.*$
)
- id: cmake-lint
name: cmake-lint
entry: ./ci/checks/run-cmake-format.sh cmake-lint
language: python
types: [cmake]
# Note that pre-commit autoupdate does not update the versions
# of dependencies, so we'll have to update this manually.
additional_dependencies:
- cmakelang==0.6.13
verbose: true
require_serial: true
files: |
(?x)^(
^rapids-cmake/.*$
)
default_language_version:
python: python3
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/setup.cfg | # Copyright (c) 2023, NVIDIA CORPORATION.
[codespell]
# note: pre-commit passes explicit lists of files here, which this skip file list doesn't override -
# this is only to allow you to run codespell interactively
skip = ./.git,./.github
# ignore short words, and typename parameters like OffsetT
ignore-regex = \b(.{1,4}|[A-Z]\w*T)\b
builtin = clear
quiet-level = 3
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/CMakeLists.txt | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
#
# This is the legacy entry point for projects using rapids-cmake.
#
# This will setup the following variables in the parent directory
# - CMAKE_MODULE_PATH
# - rapids-cmake-dir
#
# This is considered legacy as it has issues when multiple projects
# use rapids-cmake via CPM inside the same global project. In those
# cases it can fail due to CMAKE_MODULE_PREFIX not being exported properly
# Enforce the minimum required CMake version for all users
cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR)
set(rapids-cmake-dir "${CMAKE_CURRENT_LIST_DIR}/rapids-cmake")
if(NOT DEFINED CACHE{rapids-cmake-dir})
set(rapids-cmake-dir "${rapids-cmake-dir}" CACHE INTERNAL "" FORCE)
endif()
if(NOT "${rapids-cmake-dir}" IN_LIST CMAKE_MODULE_PATH)
list(APPEND CMAKE_MODULE_PATH "${rapids-cmake-dir}")
endif()
# Propagate up the rapids-cmake version
include("${rapids-cmake-dir}/rapids-version.cmake")
set(rapids-cmake-version ${rapids-cmake-version} PARENT_SCOPE)
# install a hook that sets up `rapids-cmake-dir` and `CMAKE_MODULE_PATH` all the way up the
# call-stack
cmake_language(DEFER DIRECTORY ${CMAKE_CURRENT_LIST_DIR} CALL include
"${rapids-cmake-dir}/../init.cmake")
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/RAPIDS.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
#
# This is the preferred entry point for projects using rapids-cmake
#
# Allow users to control which version is used
if(NOT rapids-cmake-version)
# Define a default version if the user doesn't set one
set(rapids-cmake-version 24.02)
endif()
# Allow users to control which GitHub repo is fetched
if(NOT rapids-cmake-repo)
# Define a default repo if the user doesn't set one
set(rapids-cmake-repo rapidsai/rapids-cmake)
endif()
# Allow users to control which branch is fetched
if(NOT rapids-cmake-branch)
# Define a default branch if the user doesn't set one
set(rapids-cmake-branch "branch-${rapids-cmake-version}")
endif()
# Allow users to control the exact URL passed to FetchContent
if(NOT rapids-cmake-url)
# Construct a default URL if the user doesn't set one
set(rapids-cmake-url "https://github.com/${rapids-cmake-repo}/")
# In order of specificity
if(rapids-cmake-sha)
# An exact git SHA takes precedence over anything
string(APPEND rapids-cmake-url "archive/${rapids-cmake-sha}.zip")
elseif(rapids-cmake-tag)
# Followed by a git tag name
string(APPEND rapids-cmake-url "archive/refs/tags/${rapids-cmake-tag}.zip")
else()
# Or if neither of the above two were defined, use a branch
string(APPEND rapids-cmake-url "archive/refs/heads/${rapids-cmake-branch}.zip")
endif()
endif()
if(POLICY CMP0135)
cmake_policy(PUSH)
cmake_policy(SET CMP0135 NEW)
endif()
include(FetchContent)
FetchContent_Declare(rapids-cmake URL "${rapids-cmake-url}")
if(POLICY CMP0135)
cmake_policy(POP)
endif()
FetchContent_GetProperties(rapids-cmake)
if(rapids-cmake_POPULATED)
# Something else has already populated rapids-cmake, only thing
# we need to do is setup the CMAKE_MODULE_PATH
if(NOT "${rapids-cmake-dir}" IN_LIST CMAKE_MODULE_PATH)
list(APPEND CMAKE_MODULE_PATH "${rapids-cmake-dir}")
endif()
else()
FetchContent_MakeAvailable(rapids-cmake)
endif()
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/README.md | # <div align="left"><img src="https://rapids.ai/assets/images/rapids_logo.png" width="90px"/> rapids-cmake</div>
**NOTE:** For the latest stable [README.md](https://github.com/rapidsai/rapids-cmake/blob/main/README.md) ensure you are on the `main` branch.
## Overview
This is a collection of CMake modules that are useful for all CUDA RAPIDS
projects. By sharing the code in a single place it makes rolling out CMake
fixes easier.
## Installation
The `rapids-cmake` module is designed to be acquired via CMake's [Fetch
Content](https://cmake.org/cmake/help/latest/module/FetchContent.html) into your project.
```cmake
cmake_minimum_required(...)
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/<PROJECT>_RAPIDS.cmake)
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-<VERSION_MAJOR>.<VERSION_MINOR>/RAPIDS.cmake
${CMAKE_CURRENT_BINARY_DIR}/<PROJECT>_RAPIDS.cmake)
endif()
include(${CMAKE_CURRENT_BINARY_DIR}/<PROJECT>_RAPIDS.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-cuda)
include(rapids-export)
include(rapids-find)
project(....)
```
Note that we recommend you install `rapids-cmake` into the root `CMakeLists.txt` of
your project before the first `project` call. This allows us to offer features such as
`rapids_cuda_architectures()`
## Usage
`rapids-cmake` provides a collection of useful CMake settings that any RAPIDS project may use.
While they maybe common, we know that they aren't universal and might need to be composed in
different ways.
To use function provided by `rapids-cmake` projects have two options:
- Call `include(rapids-<component>)` as that imports all commonly used functions for that component
- Load each function independently via `include(${rapids-cmake-dir}/<component>/<function_name>.cmake)`
## Components
Complete online documentation for all components can be found at:
https://docs.rapids.ai/api/rapids-cmake/nightly/api.html
### cmake
The `rapids-cmake` module contains helpful general CMake functionality
- `rapids_cmake_build_type( )` handles initialization of `CMAKE_BUILD_TYPE`
- `rapids_cmake_support_conda_env( target [MODIFY_PREFIX_PATH])` Establish a target that holds the CONDA environment
include and link directories.
- `rapids_cmake_write_version_file( <file> )` Write a C++ header with a projects MAJOR, MINOR, and PATCH defines
### cpm
The `rapids-cpm` module contains CPM functionality to allow projects to acquire dependencies consistently.
For consistentcy All targets brought in via `rapids-cpm` are GLOBAL targets.
- `rapids_cpm_init()` handles initialization of the CPM module.
- `raipds_cpm_find(<project> name BUILD_EXPORT_SET <name> INSTALL_EXPORT_SET <name>)` Will search for a module and fall back to installing via CPM. Offers support to track dependencies for easy package exporting
### cuda
The `rapids-cuda` module contains core functionality to allow projects to build CUDA code robustly.
The most commonly used function are:
- `rapids_cuda_init_architectures(<project_name>)` handles initialization of `CMAKE_CUDA_ARCHITECTURE`. MUST BE CALLED BEFORE `PROJECT()`
- `rapids_cuda_init_runtime(<mode>)` handles initialization of `CMAKE_CUDA_RUNTIME_LIBRARY`.
- `rapids_cuda_patch_toolkit()` corrects bugs in the CUDAToolkit module that are being upstreamed.
### cython
The `rapids_cython` functions allow projects to easily build cython modules using
[scikit-build](https://scikit-build.readthedocs.io/en/latest/).
- `rapids_cython_init()` handles initialization of scikit-build and cython.
- `rapids_create_modules([CXX] [SOURCE_FILES <src1> <src2> ...] [LINKED_LIBRARIES <lib1> <lib2> ... ] [INSTALL_DIR <install_path>] [MODULE_PREFIX <module_prefix>] )` will create cython modules for each provided source file
### export
The `rapids-export` module contains core functionality to allow projects to easily record and write out
build and install dependencies, that come from `find_package` or `cpm`
- `rapids_export(<type> <project> EXPORT_SET <name>)` write out all the require components of a
projects config module so that the `install` or `build` directory can be imported via `find_package`. See `rapids_export` documentation for full documentation
### find
The `rapids-find` module contains core functionality to allow projects to easily generate FindModule
or export `find_package` calls:
The most commonly used function are:
- `rapids_find_package(<project_name> BUILD_EXPORT_SET <name> INSTALL_EXPORT_SET <name> )` Combines `find_package` and support to track dependencies for easy package exporting
- `rapids_generate_module(<PackageName> HEADER_NAMES <paths...> LIBRARY_NAMES <names...> )` Generate a FindModule for the given package. Allows association to export sets so the generated FindModule can be shipped with the project
### test
The `rapids_test` functions simplify CTest resource allocation, allowing for
tests to run in parallel without overallocating GPU resources.
The most commonly used functions are:
- `rapids_test_add(NAME <test_name> GPUS <N> PERCENT <N>)`: State how many GPU resources a single
test requires
## Overriding RAPIDS.cmake
At times projects or developers will need to verify ``rapids-cmake`` branches. To do this you can set variables that control which repository ``RAPIDS.cmake`` downloads, which should be done like this:
```cmake
# To override the version that is pulled:
set(rapids-cmake-version "<version>")
# To override the GitHub repository:
set(rapids-cmake-repo "<my_fork>")
# To use an exact Git SHA:
set(rapids-cmake-sha "<my_git_sha>")
# To use a Git tag:
set(rapids-cmake-tag "<my_git_tag>")
# To override the repository branch:
set(rapids-cmake-branch "<my_feature_branch>")
# Or to override the entire repository URL (e.g. to use a GitLab repo):
set(rapids-cmake-url "https://gitlab.com/<my_user>/<my_fork>/-/archive/<my_branch>/<my_fork>-<my_branch>.zip")
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-22.10/RAPIDS.cmake
${CMAKE_CURRENT_BINARY_DIR}/RAPIDS.cmake)
include(${CMAKE_CURRENT_BINARY_DIR}/RAPIDS.cmake)
```
A few notes:
- An explicitly defined ``rapids-cmake-url`` will always be used
- `rapids-cmake-sha` takes precedence over `rapids-cmake-tag`
- `rapids-cmake-tag` takes precedence over `rapids-cmake-branch`
- It is advised to always set `rapids-cmake-version` to the version expected by the repo your modifications will pull
## Contributing
Review the [CONTRIBUTING.md](https://github.com/rapidsai/rapids-cmake/blob/main/CONTRIBUTING.md) file for information on how to contribute code and issues to the project.
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/CHANGELOG.md | # rapids-cmake 23.10.00 (11 Oct 2023)
## 🐛 Bug Fixes
- Quote the list of patch files in case they have spaces in their paths ([#463](https://github.com/rapidsai/rapids-cmake/pull/463)) [@ericniebler](https://github.com/ericniebler)
- cpm overrides don't occur when `CPM_<pkg>_SOURCE` exists ([#458](https://github.com/rapidsai/rapids-cmake/pull/458)) [@robertmaynard](https://github.com/robertmaynard)
- Use `conda mambabuild` not `mamba mambabuild` ([#457](https://github.com/rapidsai/rapids-cmake/pull/457)) [@bdice](https://github.com/bdice)
- Support fmt use in debug builds ([#456](https://github.com/rapidsai/rapids-cmake/pull/456)) [@robertmaynard](https://github.com/robertmaynard)
## 📖 Documentation
- Move rapids_cpm_package_override to CPM section of docs ([#462](https://github.com/rapidsai/rapids-cmake/pull/462)) [@robertmaynard](https://github.com/robertmaynard)
- Improve docs around fetch content and rapids-cmake overrides ([#444](https://github.com/rapidsai/rapids-cmake/pull/444)) [@robertmaynard](https://github.com/robertmaynard)
## 🚀 New Features
- Bump cuco version ([#452](https://github.com/rapidsai/rapids-cmake/pull/452)) [@PointKernel](https://github.com/PointKernel)
## 🛠️ Improvements
- Update image names ([#461](https://github.com/rapidsai/rapids-cmake/pull/461)) [@AyodeAwe](https://github.com/AyodeAwe)
- Update to CPM v0.38.5 ([#460](https://github.com/rapidsai/rapids-cmake/pull/460)) [@trxcllnt](https://github.com/trxcllnt)
- Update to clang 16.0.6. ([#459](https://github.com/rapidsai/rapids-cmake/pull/459)) [@bdice](https://github.com/bdice)
- Use `copy-pr-bot` ([#455](https://github.com/rapidsai/rapids-cmake/pull/455)) [@ajschmidt8](https://github.com/ajschmidt8)
# rapids-cmake 23.08.00 (9 Aug 2023)
## 🐛 Bug Fixes
- Use < gcc-11 with cuda 11.5 to avoid nvbench compile failures ([#448](https://github.com/rapidsai/rapids-cmake/pull/448)) [@robertmaynard](https://github.com/robertmaynard)
- Ensure tests the modify same git repo don't execute at the same time ([#446](https://github.com/rapidsai/rapids-cmake/pull/446)) [@robertmaynard](https://github.com/robertmaynard)
- Fix CUDA 11.5 tests by adding dependencies entries. ([#443](https://github.com/rapidsai/rapids-cmake/pull/443)) [@bdice](https://github.com/bdice)
- Remove trailing comma and add pre-commit hook for JSON validation. ([#440](https://github.com/rapidsai/rapids-cmake/pull/440)) [@bdice](https://github.com/bdice)
- When nvcomp is found locally print where it is on disk ([#434](https://github.com/rapidsai/rapids-cmake/pull/434)) [@robertmaynard](https://github.com/robertmaynard)
- Correct two issues found when testing CMake 3.27 rc2 ([#432](https://github.com/rapidsai/rapids-cmake/pull/432)) [@robertmaynard](https://github.com/robertmaynard)
- Correct re-root controls from conda-forge with thrust/cub/etc ([#431](https://github.com/rapidsai/rapids-cmake/pull/431)) [@robertmaynard](https://github.com/robertmaynard)
- Bug/proprietary binary obeys `always_download` ([#430](https://github.com/rapidsai/rapids-cmake/pull/430)) [@robertmaynard](https://github.com/robertmaynard)
- Correct install_relocatable issues found by libcudf ([#423](https://github.com/rapidsai/rapids-cmake/pull/423)) [@robertmaynard](https://github.com/robertmaynard)
- test_install_relocatable correct run_gpu_test.cmake location ([#420](https://github.com/rapidsai/rapids-cmake/pull/420)) [@robertmaynard](https://github.com/robertmaynard)
- Fea/move to latest nvbench ([#417](https://github.com/rapidsai/rapids-cmake/pull/417)) [@robertmaynard](https://github.com/robertmaynard)
- Use [@loader_path instead of $ORIGIN on MacOS ([#403](https://github.com/rapidsai/rapids-cmake/pull/403)) @manopapad](https://github.com/loader_path instead of $ORIGIN on MacOS ([#403](https://github.com/rapidsai/rapids-cmake/pull/403)) @manopapad)
- Make NAMESPACE property truly optional in rapids_export ([#358](https://github.com/rapidsai/rapids-cmake/pull/358)) [@agirault](https://github.com/agirault)
## 🚀 New Features
- Update rapids-cmake ci to support conda-forge CUDA 12 ([#437](https://github.com/rapidsai/rapids-cmake/pull/437)) [@robertmaynard](https://github.com/robertmaynard)
- Bump cuco version ([#435](https://github.com/rapidsai/rapids-cmake/pull/435)) [@PointKernel](https://github.com/PointKernel)
- Add rapids_cuda_set_runtime ([#429](https://github.com/rapidsai/rapids-cmake/pull/429)) [@robertmaynard](https://github.com/robertmaynard)
- support_conda_env support host and build CTK 12 locations ([#428](https://github.com/rapidsai/rapids-cmake/pull/428)) [@robertmaynard](https://github.com/robertmaynard)
- rapids_find_generate_module Support user code blocks ([#415](https://github.com/rapidsai/rapids-cmake/pull/415)) [@robertmaynard](https://github.com/robertmaynard)
- Rewrite of rapids_test_install_relocatable to support genex expressions ([#410](https://github.com/rapidsai/rapids-cmake/pull/410)) [@robertmaynard](https://github.com/robertmaynard)
## 🛠️ Improvements
- Conditionally modify envvar vs. global CMAKE_PREFIX_PATH in `rapids_cmake_support_conda_env` ([#439](https://github.com/rapidsai/rapids-cmake/pull/439)) [@trxcllnt](https://github.com/trxcllnt)
- Migrate to updated shared-action-workflows name for CUDA 12 CI ([#438](https://github.com/rapidsai/rapids-cmake/pull/438)) [@bdice](https://github.com/bdice)
- Fix google benchmark name and update version ([#425](https://github.com/rapidsai/rapids-cmake/pull/425)) [@vyasr](https://github.com/vyasr)
- use rapids-upload-docs script ([#419](https://github.com/rapidsai/rapids-cmake/pull/419)) [@AyodeAwe](https://github.com/AyodeAwe)
- Remove documentation build scripts for Jenkins ([#418](https://github.com/rapidsai/rapids-cmake/pull/418)) [@ajschmidt8](https://github.com/ajschmidt8)
- Upload conda packages for rapids_core_dependencies. ([#414](https://github.com/rapidsai/rapids-cmake/pull/414)) [@bdice](https://github.com/bdice)
# rapids-cmake 23.06.00 (7 Jun 2023)
## 🚨 Breaking Changes
- Using deprecated CUDA_ARCHITECTURE values now produces an error. ([#397](https://github.com/rapidsai/rapids-cmake/pull/397)) [@robertmaynard](https://github.com/robertmaynard)
- rapids_cpm cccl packages cmake files are now relocated to not clash with upstream ([#393](https://github.com/rapidsai/rapids-cmake/pull/393)) [@robertmaynard](https://github.com/robertmaynard)
## 🐛 Bug Fixes
- Revert "Define Cython language_level explicitly. ([#394)" (#396](https://github.com/rapidsai/rapids-cmake/pull/394)" (#396)) [@vyasr](https://github.com/vyasr)
- rapids_cpm cccl packages cmake files are now relocated to not clash with upstream ([#393](https://github.com/rapidsai/rapids-cmake/pull/393)) [@robertmaynard](https://github.com/robertmaynard)
## 📖 Documentation
- Correct basics to api cross refs ([#405](https://github.com/rapidsai/rapids-cmake/pull/405)) [@robertmaynard](https://github.com/robertmaynard)
## 🚀 New Features
- Update cuco git tag to support `cuco::static_set` ([#407](https://github.com/rapidsai/rapids-cmake/pull/407)) [@PointKernel](https://github.com/PointKernel)
- Upgrade GTest version to 1.13 ([#401](https://github.com/rapidsai/rapids-cmake/pull/401)) [@robertmaynard](https://github.com/robertmaynard)
- Using deprecated CUDA_ARCHITECTURE values now produces an error. ([#397](https://github.com/rapidsai/rapids-cmake/pull/397)) [@robertmaynard](https://github.com/robertmaynard)
## 🛠️ Improvements
- run docs nightly too ([#413](https://github.com/rapidsai/rapids-cmake/pull/413)) [@AyodeAwe](https://github.com/AyodeAwe)
- Update cuco git tag to fetch several bug fixes ([#412](https://github.com/rapidsai/rapids-cmake/pull/412)) [@PointKernel](https://github.com/PointKernel)
- Revert shared workflows branch ([#406](https://github.com/rapidsai/rapids-cmake/pull/406)) [@ajschmidt8](https://github.com/ajschmidt8)
- Upgrade to Python 3.9 (drop Python 3.9) ([#404](https://github.com/rapidsai/rapids-cmake/pull/404)) [@shwina](https://github.com/shwina)
- Remove usage of rapids-get-rapids-version-from-git ([#402](https://github.com/rapidsai/rapids-cmake/pull/402)) [@jjacobelli](https://github.com/jjacobelli)
- Update clang-format ([#398](https://github.com/rapidsai/rapids-cmake/pull/398)) [@bdice](https://github.com/bdice)
- Define Cython language_level explicitly. ([#394](https://github.com/rapidsai/rapids-cmake/pull/394)) [@bdice](https://github.com/bdice)
# rapids-cmake 23.04.00 (6 Apr 2023)
## 🐛 Bug Fixes
- install_relocatable only installs files that exist ([#392](https://github.com/rapidsai/rapids-cmake/pull/392)) [@robertmaynard](https://github.com/robertmaynard)
- Revert "install tests environment properties ([#390)" (#391](https://github.com/rapidsai/rapids-cmake/pull/390)" (#391)) [@robertmaynard](https://github.com/robertmaynard)
- Add `COMPONENT` arguments for rapids_export to formatting file. ([#389](https://github.com/rapidsai/rapids-cmake/pull/389)) [@robertmaynard](https://github.com/robertmaynard)
- install_relocatable generate correct installed RESOURCE_SPEC_FILE ([#386](https://github.com/rapidsai/rapids-cmake/pull/386)) [@robertmaynard](https://github.com/robertmaynard)
- support_conda_env only add rpath-link flag to linkers that support it. ([#384](https://github.com/rapidsai/rapids-cmake/pull/384)) [@robertmaynard](https://github.com/robertmaynard)
- rapids_cpm_nvbench properly specify usage of external fmt library ([#376](https://github.com/rapidsai/rapids-cmake/pull/376)) [@robertmaynard](https://github.com/robertmaynard)
- rapids_cpm_spdlog properly specify usage of external fmt library ([#375](https://github.com/rapidsai/rapids-cmake/pull/375)) [@robertmaynard](https://github.com/robertmaynard)
- Patch nvbench to allow usage of external fmt ([#373](https://github.com/rapidsai/rapids-cmake/pull/373)) [@robertmaynard](https://github.com/robertmaynard)
- Support static builds of fmt ([#372](https://github.com/rapidsai/rapids-cmake/pull/372)) [@robertmaynard](https://github.com/robertmaynard)
- Update to latest nvbench ([#371](https://github.com/rapidsai/rapids-cmake/pull/371)) [@vyasr](https://github.com/vyasr)
## 📖 Documentation
- Fix misspelling of rapids_cpm_init ([#385](https://github.com/rapidsai/rapids-cmake/pull/385)) [@dagardner-nv](https://github.com/dagardner-nv)
## 🚀 New Features
- rapids_test_install_relocatable tracks tests environment properties ([#390](https://github.com/rapidsai/rapids-cmake/pull/390)) [@robertmaynard](https://github.com/robertmaynard)
- rapids_test_install_relocatable EXCLUDE_FROM_ALL is now the default ([#388](https://github.com/rapidsai/rapids-cmake/pull/388)) [@robertmaynard](https://github.com/robertmaynard)
- Support downloading nvcomp CTK 11 or 12 binaries ([#381](https://github.com/rapidsai/rapids-cmake/pull/381)) [@robertmaynard](https://github.com/robertmaynard)
- Introduce clang-format to rapids-cmake to format C++ code examples ([#378](https://github.com/rapidsai/rapids-cmake/pull/378)) [@robertmaynard](https://github.com/robertmaynard)
- proprietary_binary now supports cuda toolkit version placeholders ([#377](https://github.com/rapidsai/rapids-cmake/pull/377)) [@robertmaynard](https://github.com/robertmaynard)
- Add `rapids_test` allowing projects to run gpu tests in parallel ([#328](https://github.com/rapidsai/rapids-cmake/pull/328)) [@robertmaynard](https://github.com/robertmaynard)
- Extend rapids_export to support the concept of optional COMPONENTS ([#154](https://github.com/rapidsai/rapids-cmake/pull/154)) [@robertmaynard](https://github.com/robertmaynard)
## 🛠️ Improvements
- Update to GCC 11 ([#382](https://github.com/rapidsai/rapids-cmake/pull/382)) [@bdice](https://github.com/bdice)
- Make docs builds less verbose ([#380](https://github.com/rapidsai/rapids-cmake/pull/380)) [@AyodeAwe](https://github.com/AyodeAwe)
- Update GHAs Workflows ([#374](https://github.com/rapidsai/rapids-cmake/pull/374)) [@ajschmidt8](https://github.com/ajschmidt8)
- Use trap to handle errors in test scripts ([#370](https://github.com/rapidsai/rapids-cmake/pull/370)) [@AjayThorve](https://github.com/AjayThorve)
- Bump spdlog to 1.11, add fmt as dependency for spdlog ([#368](https://github.com/rapidsai/rapids-cmake/pull/368)) [@kkraus14](https://github.com/kkraus14)
- Clean up and sort CPM packages. ([#366](https://github.com/rapidsai/rapids-cmake/pull/366)) [@bdice](https://github.com/bdice)
- Update shared workflow branches ([#365](https://github.com/rapidsai/rapids-cmake/pull/365)) [@ajschmidt8](https://github.com/ajschmidt8)
- Add fmt 9.1.0 ([#364](https://github.com/rapidsai/rapids-cmake/pull/364)) [@kkraus14](https://github.com/kkraus14)
- Move date to build string in `conda` recipe ([#359](https://github.com/rapidsai/rapids-cmake/pull/359)) [@ajschmidt8](https://github.com/ajschmidt8)
- Add docs build job ([#347](https://github.com/rapidsai/rapids-cmake/pull/347)) [@AyodeAwe](https://github.com/AyodeAwe)
# rapids-cmake 23.02.00 (9 Feb 2023)
## 🐛 Bug Fixes
- Remove incorrect deprecation for CMAKE_CUDA_ARCHITECTURES="NATIVE" ([#355](https://github.com/rapidsai/rapids-cmake/pull/355)) [@robertmaynard](https://github.com/robertmaynard)
- cpm: `always_download` now considers `patches` json entry ([#353](https://github.com/rapidsai/rapids-cmake/pull/353)) [@robertmaynard](https://github.com/robertmaynard)
- Use string literals for policy test messages so no escaping needed ([#351](https://github.com/rapidsai/rapids-cmake/pull/351)) [@robertmaynard](https://github.com/robertmaynard)
- Revert "Update spdlog to 1.11 ( latest version ) ([#342)" (#346](https://github.com/rapidsai/rapids-cmake/pull/342)" (#346)) [@bdice](https://github.com/bdice)
- Revert update of libcudacxx 1.9 ([#337](https://github.com/rapidsai/rapids-cmake/pull/337)) [@robertmaynard](https://github.com/robertmaynard)
- rapids_cuda_patch_toolkit: Better handle non-standard toolkits ([#324](https://github.com/rapidsai/rapids-cmake/pull/324)) [@robertmaynard](https://github.com/robertmaynard)
- Revert "Upgrade spdlog to 1.10.0 ([#312)" (#323](https://github.com/rapidsai/rapids-cmake/pull/312)" (#323)) [@bdice](https://github.com/bdice)
- rapids_cuda_init_architectures now supports CUDAARCHS env variable ([#322](https://github.com/rapidsai/rapids-cmake/pull/322)) [@robertmaynard](https://github.com/robertmaynard)
- Remove usage of FetchContent from tests to improve perf ([#303](https://github.com/rapidsai/rapids-cmake/pull/303)) [@robertmaynard](https://github.com/robertmaynard)
## 🚀 New Features
- Update nvCOMP version to 2.6.1 ([#360](https://github.com/rapidsai/rapids-cmake/pull/360)) [@vuule](https://github.com/vuule)
- cpm: Rework `always_download` rules to be smarter ([#348](https://github.com/rapidsai/rapids-cmake/pull/348)) [@robertmaynard](https://github.com/robertmaynard)
- Add deprecation notice to passing "" to CMAKE_CUDA_ARCHITECTURES ([#345](https://github.com/rapidsai/rapids-cmake/pull/345)) [@robertmaynard](https://github.com/robertmaynard)
- Update to libcudacxx 1.9.1 to have a version >= CUDA Toolkit 12 ([#343](https://github.com/rapidsai/rapids-cmake/pull/343)) [@robertmaynard](https://github.com/robertmaynard)
- Update spdlog to 1.11 ( latest version ) ([#342](https://github.com/rapidsai/rapids-cmake/pull/342)) [@robertmaynard](https://github.com/robertmaynard)
- Update to nvcomp 2.6 ([#341](https://github.com/rapidsai/rapids-cmake/pull/341)) [@robertmaynard](https://github.com/robertmaynard)
- Add deprecation warnings for usage of `ALL` ([#339](https://github.com/rapidsai/rapids-cmake/pull/339)) [@robertmaynard](https://github.com/robertmaynard)
- rapids-cmake now errors out when CPM can't be downloaded ([#335](https://github.com/rapidsai/rapids-cmake/pull/335)) [@robertmaynard](https://github.com/robertmaynard)
- Update to nvcomp 2.5 ([#333](https://github.com/rapidsai/rapids-cmake/pull/333)) [@robertmaynard](https://github.com/robertmaynard)
- Update to libcudacxx 1.9 to match version found in CUDA Toolkit 12 ([#332](https://github.com/rapidsai/rapids-cmake/pull/332)) [@robertmaynard](https://github.com/robertmaynard)
- Update cuco git tag to fetch bug fixes and cleanups ([#329](https://github.com/rapidsai/rapids-cmake/pull/329)) [@PointKernel](https://github.com/PointKernel)
- Fea/support cmake cuda architectures rapids value ([#327](https://github.com/rapidsai/rapids-cmake/pull/327)) [@robertmaynard](https://github.com/robertmaynard)
- Upgrade spdlog to 1.10.0 ([#312](https://github.com/rapidsai/rapids-cmake/pull/312)) [@kkraus14](https://github.com/kkraus14)
## 🛠️ Improvements
- Update shared workflow branches ([#361](https://github.com/rapidsai/rapids-cmake/pull/361)) [@ajschmidt8](https://github.com/ajschmidt8)
- Build against CUDA `11.8` ([#344](https://github.com/rapidsai/rapids-cmake/pull/344)) [@ajschmidt8](https://github.com/ajschmidt8)
- Make generated find module targets global ([#340](https://github.com/rapidsai/rapids-cmake/pull/340)) [@vyasr](https://github.com/vyasr)
- Add codespell and whitespace linters to pre-commit hooks. ([#338](https://github.com/rapidsai/rapids-cmake/pull/338)) [@bdice](https://github.com/bdice)
- Use pre-commit for style checks ([#336](https://github.com/rapidsai/rapids-cmake/pull/336)) [@bdice](https://github.com/bdice)
- Branch 23.02 merge 22.12 ([#331](https://github.com/rapidsai/rapids-cmake/pull/331)) [@vyasr](https://github.com/vyasr)
- Update conda recipes. ([#330](https://github.com/rapidsai/rapids-cmake/pull/330)) [@bdice](https://github.com/bdice)
- Fix typo. ([#311](https://github.com/rapidsai/rapids-cmake/pull/311)) [@vyasr](https://github.com/vyasr)
# rapids-cmake 22.12.00 (8 Dec 2022)
## 🐛 Bug Fixes
- Don't use CMake 3.25.0 as it has a show stopping FindCUDAToolkit bug ([#308](https://github.com/rapidsai/rapids-cmake/pull/308)) [@robertmaynard](https://github.com/robertmaynard)
- Add missing CPM_ARGS to gbench ([#294](https://github.com/rapidsai/rapids-cmake/pull/294)) [@vyasr](https://github.com/vyasr)
- Patch results are only displayed once per invocation of CMake ([#292](https://github.com/rapidsai/rapids-cmake/pull/292)) [@robertmaynard](https://github.com/robertmaynard)
- Add thrust output iterator fix to rapids-cmake thrust patches ([#291](https://github.com/rapidsai/rapids-cmake/pull/291)) [@robertmaynard](https://github.com/robertmaynard)
## 📖 Documentation
- Update pull request template to match rest of RAPIDS ([#280](https://github.com/rapidsai/rapids-cmake/pull/280)) [@robertmaynard](https://github.com/robertmaynard)
- Clarify rapids_cuda_init_architectures behavior ([#279](https://github.com/rapidsai/rapids-cmake/pull/279)) [@robertmaynard](https://github.com/robertmaynard)
## 🚀 New Features
- Update cuco git tag ([#302](https://github.com/rapidsai/rapids-cmake/pull/302)) [@PointKernel](https://github.com/PointKernel)
- Remove old CI files ([#300](https://github.com/rapidsai/rapids-cmake/pull/300)) [@robertmaynard](https://github.com/robertmaynard)
- Update cuco to version that supports Ada and Hopper ([#299](https://github.com/rapidsai/rapids-cmake/pull/299)) [@robertmaynard](https://github.com/robertmaynard)
- Move libcudacxx 1.8.1 so we support sm90 ([#296](https://github.com/rapidsai/rapids-cmake/pull/296)) [@robertmaynard](https://github.com/robertmaynard)
- Add ability to specify library directories for target rpaths ([#295](https://github.com/rapidsai/rapids-cmake/pull/295)) [@vyasr](https://github.com/vyasr)
- Add support for cloning Google benchmark ([#293](https://github.com/rapidsai/rapids-cmake/pull/293)) [@vyasr](https://github.com/vyasr)
- Add `current_json_dir` placeholder in json patch file values ([#289](https://github.com/rapidsai/rapids-cmake/pull/289)) [@robertmaynard](https://github.com/robertmaynard)
- Add sm90 ( Hopper ) to rapids-cmake "ALL" mode ([#285](https://github.com/rapidsai/rapids-cmake/pull/285)) [@robertmaynard](https://github.com/robertmaynard)
- Enable copy_prs ops-bot config ([#284](https://github.com/rapidsai/rapids-cmake/pull/284)) [@robertmaynard](https://github.com/robertmaynard)
- Add GitHub action workflow to rapids-cmake ([#283](https://github.com/rapidsai/rapids-cmake/pull/283)) [@robertmaynard](https://github.com/robertmaynard)
- Create conda package of patched dependencies ([#275](https://github.com/rapidsai/rapids-cmake/pull/275)) [@robertmaynard](https://github.com/robertmaynard)
- Switch thrust over to use rapids-cmake patches ([#265](https://github.com/rapidsai/rapids-cmake/pull/265)) [@robertmaynard](https://github.com/robertmaynard)
## 🛠️ Improvements
- Remove `rapids-dependency-file-generator` `FIXME` ([#305](https://github.com/rapidsai/rapids-cmake/pull/305)) [@ajschmidt8](https://github.com/ajschmidt8)
- Add `ninja` as build dependency ([#301](https://github.com/rapidsai/rapids-cmake/pull/301)) [@ajschmidt8](https://github.com/ajschmidt8)
- Forward merge 22.10 into 22.12 ([#297](https://github.com/rapidsai/rapids-cmake/pull/297)) [@vyasr](https://github.com/vyasr)
# rapids-cmake 22.10.00 (12 Oct 2022)
## 🚨 Breaking Changes
- Update rapids-cmake to require cmake 3.23.1 (#227) @robertmaynard
- put $PREFIX before $BUILD_PREFIX in conda build (#182) @kkraus14
## 🐛 Bug Fixes
- Update to nvcomp 2.4.1 to fix zstd decompression (#286) @robertmaynard
- Restore rapids_cython_create_modules output variable name (#276) @robertmaynard
- rapids_cuda_init_architectures now obeys CUDAARCHS env variable (#270) @robertmaynard
- Update to Thrust 1.17.2 to fix cub ODR issues (#269) @robertmaynard
- conda_env: pass conda prefix as a rpath-link directory (#263) @robertmaynard
- Update cuCollections to fix issue with INSTALL_CUCO set to OFF. (#261) @bdice
- rapids_cpm_libcudacxx correct location of libcudacxx-config (#258) @robertmaynard
- Update rapids_find_generate_module to cmake 3.23 (#256) @robertmaynard
- Handle reconfiguring with USE_PROPRIETARY_BINARY value differing (#255) @robertmaynard
- rapids_cpm_thrust record build directory location of thrust-config (#254) @robertmaynard
- disable cuco install rules when no INSTALL_EXPORT_SET (#250) @robertmaynard
- Patch thrust and cub install rules to have proper header searches (#244) @robertmaynard
- Ensure that we install Thrust and Cub correctly. (#243) @robertmaynard
- Revert "Update to CPM v0.35.4 for URL downloads... (#236)" (#242) @robertmaynard
- put $PREFIX before $BUILD_PREFIX in conda build (#182) @kkraus14
## 📖 Documentation
- Correct broken patch_toolkit API docs, and CMake API cross references (#271) @robertmaynard
- Provide suggestions when encountering an incomplete GTest package (#247) @robertmaynard
- Docs: RAPIDS.cmake should be placed in current bin dir (#241) @robertmaynard
- Remove incorrect install location note on rapids_export (#232) @robertmaynard
## 🚀 New Features
- Update to CPM 0.35.6 as it has needed changes for cpm patching support. (#273) @robertmaynard
- Update to nvcomp 2.4 which now offers aarch64 binaries! (#272) @robertmaynard
- Support the concept of a patches to apply to a project built via CPM (#264) @robertmaynard
- Branch 22.10 merge 22.08 (#262) @robertmaynard
- Introduce rapids_cuda_patch_toolkit (#260) @robertmaynard
- Update libcudacxx to 1.8 (#253) @robertmaynard
- Update to CPM version 0.35.5 (#249) @robertmaynard
- Update to CPM v0.35.4 for URL downloads match the download time (#236) @robertmaynard
- rapids-cmake dependency tracking now understands COMPONENTS (#234) @robertmaynard
- Update to thrust 1.17 (#231) @robertmaynard
- Update to CPM v0.35.3 to support symlink build directories (#230) @robertmaynard
- Update rapids-cmake to require cmake 3.23.1 (#227) @robertmaynard
- Improve GPU detection by doing less subsequent executions (#222) @robertmaynard
## 🛠️ Improvements
- Fix typo in `rapids-cmake-url` (#267) @trxcllnt
- Ensure `<pkg>_FOUND` is set in the generated `Find<pkg>.cmake` file (#266) @trxcllnt
- Set `CUDA_USE_STATIC_CUDA_RUNTIME` to control legacy `FindCUDA.cmake`behavior (#259) @trxcllnt
- Use the GitHub `.zip` URI instead of `GIT_REPOSITORY` and `GIT_BRANCH` (#257) @trxcllnt
- Update nvcomp to 2.3.3 (#221) @vyasr
# rapids-cmake 22.08.00 (17 Aug 2022)
## 🐛 Bug Fixes
- json exclude flag behaves as expected libcudacx//thrust/nvcomp ([#223](https://github.com/rapidsai/rapids-cmake/pull/223)) [@robertmaynard](https://github.com/robertmaynard)
- Remove nvcomp dependency on CUDA::cudart_static ([#218](https://github.com/rapidsai/rapids-cmake/pull/218)) [@robertmaynard](https://github.com/robertmaynard)
- Timestamps for URL downloads match the download time ([#215](https://github.com/rapidsai/rapids-cmake/pull/215)) [@robertmaynard](https://github.com/robertmaynard)
- Revert "Update nvcomp to 2.3.2 ([#209)" (#210](https://github.com/rapidsai/rapids-cmake/pull/209)" (#210)) [@vyasr](https://github.com/vyasr)
- rapids-cmake won't ever use an existing variable starting with RAPIDS_ ([#203](https://github.com/rapidsai/rapids-cmake/pull/203)) [@robertmaynard](https://github.com/robertmaynard)
## 📖 Documentation
- Docs now provide rapids_find_package examples ([#220](https://github.com/rapidsai/rapids-cmake/pull/220)) [@robertmaynard](https://github.com/robertmaynard)
- Minor typo fix in api.rst ([#207](https://github.com/rapidsai/rapids-cmake/pull/207)) [@vyasr](https://github.com/vyasr)
- rapids_cpm_<pkgs> document handling of unparsed args ([#206](https://github.com/rapidsai/rapids-cmake/pull/206)) [@robertmaynard](https://github.com/robertmaynard)
- Docs/remove doc warnings ([#205](https://github.com/rapidsai/rapids-cmake/pull/205)) [@robertmaynard](https://github.com/robertmaynard)
- Fix docs: default behavior is to use a shallow git clone. ([#204](https://github.com/rapidsai/rapids-cmake/pull/204)) [@bdice](https://github.com/bdice)
- Add rapids_cython to the html docs ([#197](https://github.com/rapidsai/rapids-cmake/pull/197)) [@robertmaynard](https://github.com/robertmaynard)
## 🚀 New Features
- More robust solution of CMake policy 135 ([#224](https://github.com/rapidsai/rapids-cmake/pull/224)) [@robertmaynard](https://github.com/robertmaynard)
- Update cuco git tag ([#213](https://github.com/rapidsai/rapids-cmake/pull/213)) [@PointKernel](https://github.com/PointKernel)
- Revert "Revert "Update nvcomp to 2.3.2 ([#209)" (#210)" (#211](https://github.com/rapidsai/rapids-cmake/pull/209)" (#210)" (#211)) [@vyasr](https://github.com/vyasr)
- Update nvcomp to 2.3.2 ([#209](https://github.com/rapidsai/rapids-cmake/pull/209)) [@robertmaynard](https://github.com/robertmaynard)
- rapids_cpm_rmm no longer install when no INSTALL_EXPORT_SET listed ([#202](https://github.com/rapidsai/rapids-cmake/pull/202)) [@robertmaynard](https://github.com/robertmaynard)
- Adds support for pulling cuCollections using rapids-cmake ([#201](https://github.com/rapidsai/rapids-cmake/pull/201)) [@vyasr](https://github.com/vyasr)
- Add support for a prefix in Cython module targets ([#198](https://github.com/rapidsai/rapids-cmake/pull/198)) [@vyasr](https://github.com/vyasr)
## 🛠️ Improvements
- `rapids_find_package()` called with explicit version and REQUIRED should fail ([#214](https://github.com/rapidsai/rapids-cmake/pull/214)) [@trxcllnt](https://github.com/trxcllnt)
# rapids-cmake 22.06.00 (7 June 2022)
## 🐛 Bug Fixes
- nvcomp install rules need to match the pre-built layout (#194) @robertmaynard
- Use target name variable. (#187) @bdice
- Remove unneeded message from rapids_export_package (#183) @robertmaynard
- rapids_cpm_thrust: Correctly find version 1.15.0 (#181) @robertmaynard
- rapids_cpm_thrust: Correctly find version 1.15.0 (#180) @robertmaynard
## 📖 Documentation
- Correct spelling mistake in cpm package docs (#188) @robertmaynard
## 🚀 New Features
- Add rapids_cpm_nvcomp with prebuilt binary support (#190) @robertmaynard
- Default Cython module RUNPATH to $ORIGIN and return the list of created targets (#189) @vyasr
- Add rapids-cython component for scikit-build based Python package builds (#184) @vyasr
- Add more exhaustive set of tests are version values of 0 (#178) @robertmaynard
- rapids_cpm_package_override now hooks into FetchContent (#164) @robertmaynard
## 🛠️ Improvements
- Update nvbench tag (#193) @PointKernel
# rapids-cmake 22.04.00 (6 Apr 2022)
## 🐛 Bug Fixes
- rapids_export now handles explicit version values of 0 correctly (#174) @robertmaynard
- rapids_export now internally uses better named variables (#172) @robertmaynard
- rapids_cpm_gtest will properly find GTest 1.10 packages (#168) @robertmaynard
- CMAKE_CUDA_ARCHITECTURES `ALL` will not insert 62 or 72 (#161) @robertmaynard
- Tracked package versions are now not required, but preferred. (#160) @robertmaynard
- cpm_thrust would fail when provided only an install export set (#155) @robertmaynard
- rapids_export generated config.cmake no longer leaks variables (#149) @robertmaynard
## 📖 Documentation
- Docs use intersphinx correctly to link to CMake command docs (#159) @robertmaynard
- Example explains when you should use `rapids_find_generate_module` (#153) @robertmaynard
- Add CMake intersphinx support (#147) @bdice
## 🚀 New Features
- Bump CPM 0.35 for per package CPM_DOWNLOAD controls (#158) @robertmaynard
- Track package versions to the generated `find_dependency` calls (#156) @robertmaynard
- Update to latest nvbench (#150) @robertmaynard
## 🛠️ Improvements
- Temporarily disable new `ops-bot` functionality (#170) @ajschmidt8
- Use exact gtest version (#165) @trxcllnt
- Add `.github/ops-bot.yaml` config file (#163) @ajschmidt8
# rapids-cmake 22.02.00 (2 Feb 2022)
## 🐛 Bug Fixes
- Ensure that nvbench doesn't require nvml when `CUDA::nvml` doesn't exist (#146) @robertmaynard
- rapids_cpm_libcudacxx handle CPM already finding libcudacxx before being called (#130) @robertmaynard
## 📖 Documentation
- Fix typos (#142) @ajschmidt8
- Fix type-o in docs `<PackageName>_BINARY_DIR` instead of `<PackageName>_BINAR_DIR` (#140) @dagardner-nv
- Set the `always_download` value in versions.json to the common case (#135) @robertmaynard
- Update Changelog to capture all 21.08 and 21.10 changes (#134) @robertmaynard
- Correct minor formatting issues (#132) @robertmaynard
- Document how to control the git rep/tag that RAPIDS.cmake uses (#131) @robertmaynard
## 🚀 New Features
- rapids-cmake now supports an empty package entry in the override file (#145) @robertmaynard
- Update NVBench for 22.02 to be the latest version (#144) @robertmaynard
- Update rapids-cmake packages to libcudacxx 1.7 (#143) @robertmaynard
- Update rapids-cmake packages to Thrust 1.15 (#138) @robertmaynard
- add exclude_from_all flag to version.json (#137) @robertmaynard
- Add `PREFIX` option to write_version_file / write_git_revision_file (#118) @robertmaynard
## 🛠️ Improvements
- Remove rapids_cmake_install_lib_dir unstable side effect checks (#136) @robertmaynard
# rapids-cmake 21.12.00 (9 Dec 2021)
## 🐛 Bug Fixes
- rapids_cpm_libcudacxx install logic is safe for multiple inclusion (#124) @robertmaynard
- rapids_cpm_libcudacxx ensures CMAKE_INSTALL_INCLUDEDIR exists (#122) @robertmaynard
- rapids_cpm_find restores CPM variables when project was already added (#121) @robertmaynard
- rapids_cpm_thrust doesn't place temp file in a searched location (#120) @robertmaynard
- Require the exact version of Thrust in the versions.json file (#119) @trxcllnt
- CMake option second parameter is the help string, not the default value (#114) @robertmaynard
- Make sure we don't do a shallow clone on nvbench (#113) @robertmaynard
- Pin NVBench to a known working SHA1 (#112) @robertmaynard
- Build directory config.cmake now sets the correct targets to global (#110) @robertmaynard
- rapids_cpm_thrust installs to a location that won't be marked system (#98) @robertmaynard
- find_package now will find modules that CPM has downloaded. (#96) @robertmaynard
- rapids_cpm_thrust dont export namespaced thrust target (#93) @robertmaynard
- rapids_cpm_spdlog specifies the correct install variable (#91) @robertmaynard
- rapids_cpm_init: `CPM_SOURCE_CACHE` doesn't mean the CPM file exists (#87) @robertmaynard
## 📖 Documentation
- Better document that rapids_cpm_find supports arbitrary projects (#108) @robertmaynard
- Update the example to showcase rapids-cmake 21.12 (#107) @robertmaynard
- Properly generate rapids_cuda_init_runtime docs (#106) @robertmaynard
## 🚀 New Features
- Introduce rapids_cpm_libcudacxx (#111) @robertmaynard
- Record formatting rules for rapids_cpm_find DOWNLOAD_ONLY option (#94) @robertmaynard
- rapids_cmake_install_lib_dir now aware of GNUInstallDirs improvements in CMake 3.22 (#85) @robertmaynard
- rapids-cmake defaults to always download overridden packages (#83) @robertmaynard
## 🛠️ Improvements
- Prefer `CPM_<pkg>_SOURCE` dirs over `find_package()` in `rapids_cpm_find` (#92) @trxcllnt
# rapids-cmake 21.10.00 (7 Oct 2021)
## 🐛 Bug Fixes
- Remove unneeded inclusions of the old setup_cpm_cache.cmake (#82) @robertmaynard
- Make sure rapids-cmake doesn't produce CMake syntax warnings (#80) @robertmaynard
- rapids_export verify DOCUMENTATION and FINAL_CODE_BLOCK exist (#75) @robertmaynard
- Make sure rapids_cpm_spdlog specifies the correct spdlog global targets (#71) @robertmaynard
- rapids_cpm_thrust specifies the correct install variable (#70) @robertmaynard
- FIX Install sphinxcontrib-moderncmakedomain in docs script (#69) @dillon-cullinan
- rapids_export_cpm(BUILD) captures location of locally found packages (#65) @robertmaynard
- Introduce rapids_cmake_install_lib_dir (#61) @robertmaynard
- rapids_export(BUILD) only creates alias targets to existing targets (#55) @robertmaynard
- rapids_find_package propagates variables from find_package (#54) @robertmaynard
- rapids_cpm_find is more invariant as one would expect (#51) @robertmaynard
- rapids-cmake tests properly state what C++ std levels they require (#46) @robertmaynard
- rapids-cmake always generates GLOBAL_TARGETS names correctly (#36) @robertmaynard
## 📖 Documentation
- Update update-version.sh (#84) @raydouglass
- Add rapids_export_find_package_root to api doc page (#76) @robertmaynard
- README.md now references online docs (#72) @robertmaynard
- Copyright year range now matches when rapids-cmake existed (#67) @robertmaynard
- cmake-format: Now aware of `rapids_cmake_support_conda_env` flags (#62) @robertmaynard
- Bug/correct invalid generate module doc layout (#47) @robertmaynard
## 🚀 New Features
- rapids-cmake SHOULD_FAIL tests verify the CMake Error string (#79) @robertmaynard
- Introduce rapids_cmake_write_git_revision_file (#77) @robertmaynard
- Allow projects to override version.json information (#74) @robertmaynard
- rapids_export_package(BUILD) captures location of locally found packages (#68) @robertmaynard
- Introduce rapids_export_find_package_root command (#64) @robertmaynard
- Introduce rapids_cpm_<preset> (#52) @robertmaynard
- Tests now can be SERIAL and use FetchContent to get rapids-cmake (#48) @robertmaynard
- rapids_export version support expanded to handle more use-cases (#37) @robertmaynard
## 🛠️ Improvements
- cpm tests now download less components and can be run in parallel. (#81) @robertmaynard
- Ensure that all rapids-cmake files have include guards (#63) @robertmaynard
- Introduce RAPIDS.cmake a better way to fetch rapids-cmake (#45) @robertmaynard
- ENH Replace gpuci_conda_retry with gpuci_mamba_retry (#44) @dillon-cullinan
# rapids-cmake 21.08.00 (4 Aug 2021)
## 🚀 New Features
- Introduce `rapids_cmake_write_version_file` to generate a C++ version header ([#23](https://github.com/rapidsai/rapids-cmake/pull/23)) [@robertmaynard](https://github.com/robertmaynard)
- Introduce `cmake-format-rapids-cmake` to allow `cmake-format` to understand rapdids-cmake custom functions ([#29](https://github.com/rapidsai/rapids-cmake/pull/29)) [@robertmaynard](https://github.com/robertmaynard)
## 🛠️ Improvements
## 🐛 Bug Fixes
- ci/gpu/build.sh uses git tags to properly compute conda env (#43) @robertmaynard
- Make sure that rapids-cmake-dir cache variable is hidden (#40) @robertmaynard
- Correct regression specify rapids-cmake-dir as a cache variable (#39) @robertmaynard
- rapids-cmake add entries to CMAKE_MODULE_PATH on first config (#34) @robertmaynard
- Add tests that verify all paths in each rapids-<component>.cmake file ([#24](https://github.com/rapidsai/rapids-cmake/pull/24)) [@robertmaynard](https://github.com/robertmaynard)
- Correct issue where `rapids_export(DOCUMENTATION` content was being ignored([#30](https://github.com/rapidsai/rapids-cmake/pull/30)) [@robertmaynard](https://github.com/robertmaynard)
- rapids-cmake can now be correctly used by multiple adjacent directories ([#33](https://github.com/rapidsai/rapids-cmake/pull/33)) [@robertmaynard](https://github.com/robertmaynard)
# rapids-cmake 21.06.00 (Date TBD)
Please see https://github.com/rapidsai/rapids-cmake/releases/tag/v21.06.0a for the latest changes to this development branch.
## 🚀 New Features
- Introduce `rapids_cmake_parse_version` for better version extraction ([#20](https://github.com/rapidsai/rapids-cmake/pull/20)) [@robertmaynard](https://github.com/robertmaynard)
## 🛠️ Improvements
- Verify that rapids-cmake always preserves CPM arguments ([#18](https://github.com/rapidsai/rapids-cmake/pull/18)) [@robertmaynard](https://github.com/robertmaynard)
- Add Sphinx based documentation for the project ([#14](https://github.com/rapidsai/rapids-cmake/pull/14)) [@robertmaynard](https://github.com/robertmaynard)
- `rapids_export` places the build export files in a location CPM can find. ([#3](https://github.com/rapidsai/rapids-cmake/pull/3)) [@robertmaynard](https://github.com/robertmaynard)
## 🐛 Bug Fixes
- Make sure we properly quote all CPM args ([#17](https://github.com/rapidsai/rapids-cmake/pull/17)) [@robertmaynard](https://github.com/robertmaynard)
- `rapids_export` correctly handles version strings with leading zeroes ([#12](https://github.com/rapidsai/rapids-cmake/pull/12)) [@robertmaynard](https://github.com/robertmaynard)
- `rapids_export_write_language` properly executes each time CMake is run ([#10](https://github.com/rapidsai/rapids-cmake/pull/10)) [@robertmaynard](https://github.com/robertmaynard)
- `rapids_export` properly sets version variables ([#9](https://github.com/rapidsai/rapids-cmake/pull/9)) [@robertmaynard](https://github.com/robertmaynard)
- `rapids_export` now obeys CMake config file naming convention ([#8](https://github.com/rapidsai/rapids-cmake/pull/8)) [@robertmaynard](https://github.com/robertmaynard)
- Refactor layout to enable adding CI and Documentation ([#5](https://github.com/rapidsai/rapids-cmake/pull/5)) [@robertmaynard](https://github.com/robertmaynard)
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/init.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
#
# This is NOT an entry point for other projects using rapids-cmake
#
# Nothing but rapids-cmake/CMakeLists.txt should include this file
#
if(NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
# Be defensive of other projects over-writing CMAKE_MODULE_PATH on us!
set(rapids-cmake-dir "${rapids-cmake-dir}" PARENT_SCOPE)
if(NOT "${rapids-cmake-dir}" IN_LIST CMAKE_MODULE_PATH)
list(APPEND CMAKE_MODULE_PATH "${rapids-cmake-dir}")
endif()
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" PARENT_SCOPE)
# Don't install this hook if another rapids project has already done so
get_directory_property(parent_dir PARENT_DIRECTORY)
cmake_language(DEFER DIRECTORY "${parent_dir}" GET_CALL_IDS rapids_existing_calls)
if(NOT rapids_init_hook IN_LIST rapids_existing_calls)
cmake_language(DEFER DIRECTORY "${parent_dir}"
ID rapids_init_hook
CALL include "${rapids-cmake-dir}/../init.cmake")
endif()
endif()
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/dependencies.yaml | # Dependency list for https://github.com/rapidsai/dependency-file-generator
files:
test:
output: none
includes:
- build
- cudatoolkit
- docs
- py_version
- test
checks:
output: none
includes:
- build
- style_checks
- py_version
docs:
output: none
includes:
- cudatoolkit
- docs
channels:
- rapidsai
- conda-forge
dependencies:
build:
common:
- output_types: [conda, requirements]
packages:
- cmake>=3.23.1,!=3.25.0
- ninja
- output_types: conda
packages:
- c-compiler
- cxx-compiler
- make
specific:
- output_types: conda
matrices:
- matrix:
arch: x86_64
cuda: "11.2"
packages:
- nvcc_linux-64=11.2
- matrix:
arch: aarch64
cuda: "11.2"
packages:
- nvcc_linux-aarch64=11.2
- matrix:
arch: x86_64
cuda: "11.4"
packages:
- nvcc_linux-64=11.4
- matrix:
arch: aarch64
cuda: "11.4"
packages:
- nvcc_linux-aarch64=11.4
- matrix:
arch: x86_64
cuda: "11.5"
packages:
- nvcc_linux-64=11.5
- matrix:
arch: aarch64
cuda: "11.5"
packages:
- nvcc_linux-aarch64=11.5
- matrix:
arch: x86_64
cuda: "11.8"
packages:
- nvcc_linux-64=11.8
- matrix:
arch: aarch64
cuda: "11.8"
packages:
- nvcc_linux-aarch64=11.8
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- cuda-nvcc
cudatoolkit:
specific:
- output_types: conda
matrices:
- matrix:
cuda: "11.2"
packages:
- cuda-version=11.2
- cudatoolkit
- gcc<11.0.0
- sysroot_linux-64==2.17
- matrix:
cuda: "11.4"
packages:
- cuda-version=11.4
- cudatoolkit
- gcc<11.0.0
- sysroot_linux-64==2.17
- matrix:
cuda: "11.5"
packages:
- cuda-version=11.5
- cudatoolkit
- gcc<11.0.0
- sysroot_linux-64==2.17
- matrix:
cuda: "11.6"
packages:
- cuda-version=11.6
- cudatoolkit
- gcc<12.0.0
- sysroot_linux-64==2.17
- matrix:
cuda: "11.8"
packages:
- cuda-version=11.8
- cudatoolkit
- gcc<12.0.0
- sysroot_linux-64==2.17
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- cuda-cupti-dev
- gcc<13.0.0
- sysroot_linux-64==2.17
docs:
common:
- output_types: [conda]
packages:
- pip
- pip:
- sphinxcontrib-moderncmakedomain
- sphinx
- sphinx-copybutton
- sphinx_rtd_theme
test:
common:
- output_types: [conda, requirements]
packages:
- cython>=0.29,<0.30
- scikit-build>=0.13.1
- libpng
- zlib
- output_types: [conda]
packages:
- fmt==9.1.0
py_version:
specific:
- output_types: conda
matrices:
- matrix:
py: "3.9"
packages:
- python=3.9
- matrix:
py: "3.10"
packages:
- python=3.10
- matrix:
packages:
- python>=3.9,<3.11
style_checks:
common:
- output_types: [conda, requirements]
packages:
- pre-commit
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/CONTRIBUTING.md | # Contributing to rapids-cmake
If you are interested in contributing to rapids-cmake, your contributions will fall
into three categories:
1. You want to report a bug, feature request, or documentation issue
- File an [issue](https://github.com/rapidsai/rapids-cmake/issues/new/choose)
describing what you encountered or what you want to see changed.
- The RAPIDS team will evaluate the issues and triage them, scheduling
them for a release. If you believe the issue needs priority attention
comment on the issue to notify the team.
2. You want to propose a new Feature and implement it
- Post about your intended feature, and we shall discuss the design and
implementation.
- Once we agree that the plan looks good, go ahead and implement it, using
the [code contributions](#code-contributions) guide below.
3. You want to implement a feature or bug-fix for an outstanding issue
- Follow the [code contributions](#code-contributions) guide below.
- If you need more context on a particular issue, please ask and we shall
provide.
## Code contributions
While RAPIDS core provides commonly used scripts we know that they aren't universal and might need to be composed in different ways.
This means that the code we are developing should be designed for composability, and all side-effects
or CMake behavior changes should be explicitly opt-in.
So when writing new rapids-cmake features make sure to think about how users might want to opt-in, and
provide the necessary function decomposition. For example lets look at an example of wanting to have an
easy wrapper around creating libraries and setting properties.
```
[=[ BAD ]=]
function(rapids_add_library target )
add_library(${target} ${ARGN})
set_target_properties(cudf
PROPERTIES
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
)
endfunction()
rapids_add_library(example SHARED ...)
[=[ GOOD ]=]
function(rapids_cmake_setup_target target )
set_target_properties(${target}
PROPERTIES
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
)
endfunction()
function(rapids_add_library target)
add_library(example ${ARGN})
rapids_cmake_setup_target( example )
endfunction()
rapids_add_library(example SHARED ...)
```
Here we can see that breaking out `rapids_cmake_setup_target` is important as it allows users
that don't/can't use `rapids_add_library` to still opt-in to other features.
Please ensure that when you are creating new features you follow the following guidelines:
- Each function should follow the `rapids_<component>_<file_name>` naming pattern
- Each function should go into a separate `.cmake` file in the appropriate directory
- Each user facing `.cmake` file should have include guards (`include_guard(GLOBAL)`)
- Each user facing `.cmake` file should be documented following the rst structure
- Each user facing function should be added to the `cmake-format.json` document
- Run `cmake-genparsers -f json` on the `.cmake` file as a starting point
- Each function first line should be `list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.<component>.<function>")`
- A file should not modify any state simply by being included. State modification should
only occur inside functions unless absolutely necessary due to restrictions of the CMake
language.
- Any files that do need to break this rule can't be part of `rapids-<component>.cmake`.
### Your first issue
1. Read the project's [README.md](https://github.com/rapidsai/rapids-cmake/blob/main/README.md)
to learn how to setup the development environment
2. Find an issue to work on. The best way is to look for the [good first issue](https://github.com/rapidsai/rapids-cmake/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
or [help wanted](https://github.com/rapidsai/rapids-cmake/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) labels
3. Comment on the issue saying you are going to work on it
4. Code! Make sure to update unit tests!
5. When done, [create your pull request](https://github.com/rapidsai/rapids-cmake/compare)
6. Verify that CI passes all [status checks](https://help.github.com/articles/about-status-checks/). Fix if needed
7. Wait for other developers to review your code and update code as needed
8. Once reviewed and approved, a RAPIDS developer will merge your pull request
Remember, if you are unsure about anything, don't hesitate to comment on issues
and ask for clarifications!
### Seasoned developers
Once you have gotten your feet wet and are more comfortable with the code, you
can look at the prioritized issues of our next release in our [project boards](https://github.com/rapidsai/rapids-cmake/projects).
> **Pro Tip:** Always look at the release board with the highest number for
issues to work on. This is where RAPIDS developers also focus their efforts.
Look at the unassigned issues, and find an issue you are comfortable with
contributing to. Start with _Step 3_ from above, commenting on the issue to let
others know you are working on it. If you have any questions related to the
implementation of the issue, ask them in the issue instead of the PR.
## Attribution
Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2021 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos | rapidsai_public_repos/rapids-cmake/.clang-format | ---
# Refer to the following link for the explanation of each params:
# http://releases.llvm.org/8.0.0/tools/clang/docs/ClangFormatStyleOptions.html
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: true
AlignConsecutiveBitFields: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortEnumsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AllowShortLambdasOnASingleLine: true
AllowShortLoopsOnASingleLine: false
# This is deprecated
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
# disabling the below splits, else, they'll just add to the vertical length of source files!
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakAfterJavaFieldAnnotations: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: WebKit
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakInheritanceList: BeforeColon
BreakStringLiterals: true
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
# Kept the below 2 to be the same as `IndentWidth` to keep everything uniform
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
# Enabling comment reflow causes doxygen comments to be messed up in their formats!
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceBeforeSquareBrackets: false
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++17
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
# Be consistent with indent-width, even for people who use tab for indentation!
TabWidth: 2
UseTab: Never
| 0 |
rapidsai_public_repos/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/rapids-export.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
include(${CMAKE_CURRENT_LIST_DIR}/export/package.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/export/cpm.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/export/export.cmake)
| 0 |
rapidsai_public_repos/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/rapids-cuda.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
include(${CMAKE_CURRENT_LIST_DIR}/cuda/init_architectures.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cuda/init_runtime.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cuda/set_architectures.cmake)
| 0 |
rapidsai_public_repos/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/rapids-find.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
include(${CMAKE_CURRENT_LIST_DIR}/find/generate_module.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/find/package.cmake)
| 0 |
rapidsai_public_repos/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/rapids-cpm.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
include(${CMAKE_CURRENT_LIST_DIR}/cpm/init.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cpm/find.cmake)
| 0 |
rapidsai_public_repos/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/rapids-test.cmake | #=============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
include(${CMAKE_CURRENT_LIST_DIR}/test/init.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/test/add.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/test/generate_resource_spec.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/test/gpu_requirements.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/test/install_relocatable.cmake)
| 0 |
rapidsai_public_repos/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/rapids-cython.cmake | #=============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
include(${CMAKE_CURRENT_LIST_DIR}/cython/init.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cython/create_modules.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cython/add_rpath_entries.cmake)
| 0 |
rapidsai_public_repos/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/rapids-version.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
# can't have an include guard on this file
# that breaks its usage by cpm/detail/package_details
if(NOT DEFINED rapids-cmake-version)
set(rapids-cmake-version 24.02)
endif()
| 0 |
rapidsai_public_repos/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/rapids-cmake.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
include(${CMAKE_CURRENT_LIST_DIR}/cmake/build_type.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cmake/install_lib_dir.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cmake/parse_version.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cmake/support_conda_env.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cmake/write_git_revision_file.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/cmake/write_version_file.cmake)
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda/set_runtime.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cuda_set_runtime
-------------------------------
.. versionadded:: v23.08.00
Establish what CUDA runtime library should be used by a single target
.. code-block:: cmake
rapids_cuda_set_runtime( target USE_STATIC (TRUE|FALSE) )
Establishes what CUDA runtime will be used for a target, via
the :cmake:prop_tgt:`CUDA_RUNTIME_LIBRARY <cmake:prop_tgt:CUDA_RUNTIME_LIBRARY>`
and by linking to `CUDA::cudart` or `CUDA::cudart_static` if the :cmake:module:`find_package(CUDAToolkit)
<cmake:module:FindCUDAToolkit>` has been called.
The linking to the `CUDA::cudart` or `CUDA::cudart_static` will have the following
usage behavior:
- For `INTERFACE` targets the linking will be `INTERFACE`
- For all other targets the linking will be `PRIVATE`
.. note::
If using the deprecated `FindCUDA.cmake` you must use the
:cmake:command:`rapids_cuda_init_runtime` method to properly establish the default
mode.
When `USE_STATIC TRUE` is provided the target will link to a
statically-linked CUDA runtime library.
When `USE_STATIC FALSE` is provided the target will link to a
shared-linked CUDA runtime library.
#]=======================================================================]
function(rapids_cuda_set_runtime target use_static value)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cuda.set_runtime")
get_target_property(type ${target} TYPE)
if(type STREQUAL "INTERFACE_LIBRARY")
set(mode INTERFACE)
else()
set(mode PRIVATE)
endif()
if(${value})
set_target_properties(${target} PROPERTIES CUDA_RUNTIME_LIBRARY STATIC)
target_link_libraries(${target} ${mode} $<TARGET_NAME_IF_EXISTS:CUDA::cudart_static>)
else()
set_target_properties(${target} PROPERTIES CUDA_RUNTIME_LIBRARY SHARED)
target_link_libraries(${target} ${mode} $<TARGET_NAME_IF_EXISTS:CUDA::cudart>)
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda/init_architectures.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cuda_init_architectures
-------------------------------
.. versionadded:: v21.06.00
Extends :cmake:variable:`CMAKE_CUDA_ARCHITECTURES <cmake:variable:CMAKE_CUDA_ARCHITECTURES>`
to include support for `RAPIDS` and `NATIVE` to make CUDA architecture compilation easier.
.. code-block:: cmake
rapids_cuda_init_architectures(<project_name>)
Used before enabling the CUDA language either via :cmake:command:`project() <cmake:command:project>` to establish the
CUDA architectures to be compiled for. Parses the :cmake:envvar:`ENV{CUDAARCHS} <cmake:envvar:CUDAARCHS>`, and
:cmake:variable:`CMAKE_CUDA_ARCHITECTURES <cmake:variable:CMAKE_CUDA_ARCHITECTURES>` for special values
`RAPIDS`, and `NATIVE`.
.. note::
Required to be called before the first :cmake:command:`project() <cmake:command:project>` call.
Will automatically call :cmake:command:`rapids_cuda_set_architectures` immediately
after :cmake:command:`project() <cmake:command:project>` with the same project name establishing
the correct values for :cmake:variable:`CMAKE_CUDA_ARCHITECTURES <cmake:variable:CMAKE_CUDA_ARCHITECTURES>`.
``project_name``
Name of the project in the subsequent :cmake:command:`project() <cmake:command:project>` call.
.. include:: supported_cuda_architectures_values.txt
Example on how to properly use :cmake:command:`rapids_cuda_init_architectures`:
.. code-block:: cmake
cmake_minimum_required(...)
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/EXAMPLE_RAPIDS.cmake)
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-<VERSION_MAJOR>.<VERSION_MINOR>/RAPIDS.cmake
${CMAKE_CURRENT_BINARY_DIR}/EXAMPLE_RAPIDS.cmake)
endif()
include(${CMAKE_CURRENT_BINARY_DIR}/EXAMPLE_RAPIDS.cmake)
include(rapids-cuda)
rapids_cuda_init_architectures(ExampleProject)
project(ExampleProject ...)
#]=======================================================================]
# cmake-lint: disable=W0105
function(rapids_cuda_init_architectures project_name)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cuda.init_architectures")
include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/detail/architectures_policy.cmake)
# If `CMAKE_CUDA_ARCHITECTURES` is not defined, build for all supported architectures. If
# `CMAKE_CUDA_ARCHITECTURES` is set to an empty string (""), build for only the current
# architecture. If `CMAKE_CUDA_ARCHITECTURES` is specified by the user, use user setting.
if(DEFINED ENV{CUDAARCHS} AND ("$ENV{CUDAARCHS}" STREQUAL "RAPIDS" OR "$ENV{CUDAARCHS}" STREQUAL
"ALL"))
set(cuda_arch_mode "$ENV{CUDAARCHS}")
rapids_cuda_architectures_policy(FROM_INIT cuda_arch_mode)
elseif(DEFINED ENV{CUDAARCHS} AND "$ENV{CUDAARCHS}" STREQUAL "NATIVE")
set(cuda_arch_mode "NATIVE")
elseif(CMAKE_CUDA_ARCHITECTURES STREQUAL "RAPIDS" OR CMAKE_CUDA_ARCHITECTURES STREQUAL "ALL")
set(cuda_arch_mode "${CMAKE_CUDA_ARCHITECTURES}")
rapids_cuda_architectures_policy(FROM_INIT cuda_arch_mode)
elseif(CMAKE_CUDA_ARCHITECTURES STREQUAL "")
set(cuda_arch_mode "NATIVE")
set(deprecated_cuda_arch_mode "EMPTY_STR")
rapids_cuda_architectures_policy(FROM_INIT deprecated_cuda_arch_mode)
elseif(CMAKE_CUDA_ARCHITECTURES STREQUAL "NATIVE")
set(cuda_arch_mode "NATIVE")
elseif(NOT (DEFINED ENV{CUDAARCHS} OR DEFINED CMAKE_CUDA_ARCHITECTURES))
set(cuda_arch_mode "RAPIDS")
endif()
# This needs to be run before enabling the CUDA language since RAPIDS supports magic values like
# `RAPIDS`, `ALL`, and `NATIVE` which if propagated cause CMake to fail to determine the CUDA
# compiler
if(cuda_arch_mode STREQUAL "RAPIDS")
set(CMAKE_CUDA_ARCHITECTURES OFF PARENT_SCOPE)
set(load_file "${CMAKE_CURRENT_FUNCTION_LIST_DIR}/detail/invoke_set_all_architectures.cmake")
elseif(cuda_arch_mode STREQUAL "NATIVE")
set(CMAKE_CUDA_ARCHITECTURES OFF PARENT_SCOPE)
set(load_file "${CMAKE_CURRENT_FUNCTION_LIST_DIR}/detail/invoke_set_native_architectures.cmake")
endif()
if(load_file)
include("${CMAKE_CURRENT_FUNCTION_LIST_DIR}/set_architectures.cmake")
# Setup to call to set CMAKE_CUDA_ARCHITECTURES values to occur right after the project call
# https://cmake.org/cmake/help/latest/command/project.html#code-injection
#
# If an existing file was specified for loading post `project` we will chain include them
if(DEFINED CMAKE_PROJECT_${project_name}_INCLUDE)
set(_RAPIDS_PREVIOUS_CMAKE_PROJECT_INCLUDE "${CMAKE_PROJECT_${project_name}_INCLUDE}"
PARENT_SCOPE)
endif()
set(CMAKE_PROJECT_${project_name}_INCLUDE "${load_file}" PARENT_SCOPE)
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda/set_architectures.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cuda_set_architectures
-------------------------------
.. versionadded:: v21.06.00
Sets up :cmake:variable:`CMAKE_CUDA_ARCHITECTURES` based on the requested mode
.. code-block:: cmake
rapids_cuda_set_architectures( (NATIVE|RAPIDS) )
Establishes what CUDA architectures that will be compiled for, overriding
any existing :cmake:variable:`CMAKE_CUDA_ARCHITECTURES` value.
This function should rarely be used, as :cmake:command:`rapids_cuda_init_architectures`
allows for the expected workflow of using :cmake:variable:`CMAKE_CUDA_ARCHITECTURES`
when configuring a project. If for some reason your project can't use
:cmake:command:`rapids_cuda_init_architectures` than you can use :cmake:command:`rapids_cuda_set_architectures`
directly.
.. note::
This is automatically called by :cmake:command:`rapids_cuda_init_architectures`
.. include:: supported_cuda_architectures_values.txt
Result Variables
^^^^^^^^^^^^^^^^
``CMAKE_CUDA_ARCHITECTURES`` will exist and set to the list of architectures
that should be compiled for. Will overwrite any existing values.
#]=======================================================================]
function(rapids_cuda_set_architectures mode)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cuda.set_architectures")
set(supported_archs "70" "75" "80" "86" "90")
if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA" AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 11.1.0)
list(REMOVE_ITEM supported_archs "86")
endif()
if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA" AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 11.8.0)
list(REMOVE_ITEM supported_archs "90")
endif()
include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/detail/architectures_policy.cmake)
rapids_cuda_architectures_policy(FROM_SET mode)
if(${mode} STREQUAL "RAPIDS")
# CMake architecture list entry of "80" means to build compute and sm. What we want is for the
# newest arch only to build that way while the rest built only for sm.
list(POP_BACK supported_archs latest_arch)
list(TRANSFORM supported_archs APPEND "-real")
list(APPEND supported_archs ${latest_arch})
set(CMAKE_CUDA_ARCHITECTURES ${supported_archs} PARENT_SCOPE)
elseif(${mode} STREQUAL "NATIVE")
include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/detail/detect_architectures.cmake)
rapids_cuda_detect_architectures(supported_archs CMAKE_CUDA_ARCHITECTURES)
list(TRANSFORM CMAKE_CUDA_ARCHITECTURES APPEND "-real")
set(CMAKE_CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES} PARENT_SCOPE)
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda/init_runtime.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cuda_init_runtime
-------------------------------
.. versionadded:: v21.06.00
Establish what CUDA runtime library should be propagated
.. code-block:: cmake
rapids_cuda_init_runtime( USE_STATIC (TRUE|FALSE) )
Establishes what CUDA runtime will be used, if not already explicitly
specified, via the :cmake:variable:`CMAKE_CUDA_RUNTIME_LIBRARY <cmake:variable:CMAKE_CUDA_RUNTIME_LIBRARY>`
variable. We also set :cmake:variable:`CUDA_USE_STATIC_CUDA_RUNTIME <cmake:module:FindCUDA>` to control
targets using the legacy `FindCUDA.cmake`
When `USE_STATIC TRUE` is provided all targets will link to a
statically-linked CUDA runtime library.
When `USE_STATIC FALSE` is provided all targets will link to a
shared-linked CUDA runtime library.
#]=======================================================================]
function(rapids_cuda_init_runtime use_static value)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cuda.init_runtime")
if(NOT DEFINED CMAKE_CUDA_RUNTIME_LIBRARY)
if(${value})
set(CMAKE_CUDA_RUNTIME_LIBRARY STATIC PARENT_SCOPE)
else()
set(CMAKE_CUDA_RUNTIME_LIBRARY SHARED PARENT_SCOPE)
endif()
endif()
# Control legacy FindCUDA.cmake behavior too
if(NOT DEFINED CUDA_USE_STATIC_CUDA_RUNTIME)
if(${value})
set(CUDA_USE_STATIC_CUDA_RUNTIME ON PARENT_SCOPE)
else()
set(CUDA_USE_STATIC_CUDA_RUNTIME OFF PARENT_SCOPE)
endif()
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda/patch_toolkit.cmake | #=============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cuda_patch_toolkit
---------------------------------
.. versionadded:: v22.10.00
Corrects missing dependencies in the CUDA toolkit
.. code-block:: cmake
rapids_cuda_patch_toolkit( )
For CMake versions 3.23.1-3, and 3.24.1 the dependencies
of cublas and cusolver targets are incorrect. This module must be called
from the same CMakeLists.txt as the first `find_project(CUDAToolkit)` to
patch the targets.
.. note::
:cmake:command:`rapids_cpm_find` will automatically call this module
when asked to find the CUDAToolkit.
#]=======================================================================]
function(rapids_cuda_patch_toolkit)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cuda.patch_toolkit")
get_directory_property(itargets IMPORTED_TARGETS)
if(CMAKE_VERSION VERSION_LESS 3.24.2)
if(CUDA::cublas IN_LIST itargets)
target_link_libraries(CUDA::cublas INTERFACE CUDA::cublasLt)
endif()
if(CUDA::cublas_static IN_LIST itargets)
target_link_libraries(CUDA::cublas_static INTERFACE CUDA::cublasLt_static)
endif()
if(CUDA::cusolver_static IN_LIST itargets)
target_link_libraries(CUDA::cusolver_static INTERFACE CUDA::cusolver_lapack_static)
endif()
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda | rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda/detail/invoke_set_all_architectures.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
#
# RAPIDS detected something use requested a file to be
# called after `project()`, so chain call them.
if(DEFINED _RAPIDS_PREVIOUS_CMAKE_PROJECT_INCLUDE)
include("${_RAPIDS_PREVIOUS_CMAKE_PROJECT_INCLUDE}")
endif()
#
# Used by rapids_cuda_init_architectures to allow the `project()` call to invoke the
# rapids_cuda_set_architectures function after compiler detection
#
rapids_cuda_set_architectures(RAPIDS)
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda | rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda/detail/architectures_policy.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cuda_architectures_policy
--------------------------------
.. versionadded:: v23.02.00
Maps deprecated mode values to new supported values and outputs rapids-cmake
deprecation warnings.
.. versionchanged:: v23.06.00
Now errors on deprecated mode values and outputs guidance on how to upgrade
.. code-block:: cmake
rapids_cuda_architectures_policy( (FROM_INIT|FROM_SET) mode_variable )
#]=======================================================================]
function(rapids_cuda_architectures_policy called_from mode_variable)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cuda.architectures_policy")
include("${rapids-cmake-dir}/cmake/detail/policy.cmake")
set(value ${${mode_variable}})
set(new_value ${value})
if(value STREQUAL "ALL")
set(new_value "RAPIDS")
if(called_from STREQUAL "FROM_INIT")
rapids_cmake_policy(DEPRECATED_IN 23.02
REMOVED_IN 23.06
MESSAGE [=[Usage of `ALL` as value for `CMAKE_CUDA_ARCHITECTURES` or the env variable `CUDAARCHS` has been deprecated, use `RAPIDS` instead.]=]
)
elseif(called_from STREQUAL "FROM_SET")
rapids_cmake_policy(DEPRECATED_IN 23.02
REMOVED_IN 23.06
MESSAGE [=[Usage of `ALL` as value passed to `rapids_cuda_set_architectures` has been deprecated, use `RAPIDS` instead.]=]
)
endif()
endif()
if(value STREQUAL "EMPTY_STR")
set(new_value "NATIVE")
rapids_cmake_policy(DEPRECATED_IN 23.02
REMOVED_IN 23.06
MESSAGE [=[Usage of `""` as value for `CMAKE_CUDA_ARCHITECTURES` has been deprecated, use `NATIVE` instead.]=]
)
endif()
set(${mode_variable} ${new_value} PARENT_SCOPE)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda | rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda/detail/detect_architectures.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
# Function uses the CUDA runtime API to query the compute capability of the device, so if a user
# doesn't pass any architecture options to CMake we only build the current architecture
function(rapids_cuda_detect_architectures possible_archs_var gpu_archs)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cuda.detect_architectures")
# Unset this first in case it's set to <empty_string> Which can happen inside rapids
set(CMAKE_CUDA_ARCHITECTURES OFF)
set(__gpu_archs ${${possible_archs_var}})
set(eval_file ${PROJECT_BINARY_DIR}/eval_gpu_archs.cu)
set(eval_exe ${PROJECT_BINARY_DIR}/eval_gpu_archs)
set(error_file ${PROJECT_BINARY_DIR}/eval_gpu_archs.stderr.log)
if(NOT DEFINED CMAKE_CUDA_COMPILER)
message(FATAL_ERROR "No CUDA compiler specified, unable to determine machine's GPUs.")
endif()
if(NOT EXISTS "${eval_exe}")
file(WRITE ${eval_file}
"
#include <cstdio>
#include <set>
#include <string>
using namespace std;
int main(int argc, char** argv) {
set<string> archs;
int nDevices;
if((cudaGetDeviceCount(&nDevices) == cudaSuccess) && (nDevices > 0)) {
for(int dev=0;dev<nDevices;++dev) {
char buff[32];
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, dev) != cudaSuccess) continue;
sprintf(buff, \"%d%d\", prop.major, prop.minor);
archs.insert(buff);
}
}
if(archs.empty()) {
printf(\"${__gpu_archs}\");
} else {
bool first = true;
for(const auto& arch : archs) {
printf(first? \"%s\" : \";%s\", arch.c_str());
first = false;
}
}
printf(\"\\n\");
return 0;
}
")
execute_process(COMMAND ${CMAKE_CUDA_COMPILER} -std=c++11 -o "${eval_exe}" "${eval_file}"
ERROR_FILE "${error_file}")
endif()
if(EXISTS "${eval_exe}")
execute_process(COMMAND "${eval_exe}" OUTPUT_VARIABLE __gpu_archs
OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_FILE "${error_file}")
message(STATUS "Auto detection of gpu-archs: ${__gpu_archs}")
else()
message(STATUS "Failed auto detection of gpu-archs. Falling back to using ${__gpu_archs}.")
endif()
set(${gpu_archs} ${__gpu_archs} PARENT_SCOPE)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda | rapidsai_public_repos/rapids-cmake/rapids-cmake/cuda/detail/invoke_set_native_architectures.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
#
# RAPIDS detected something use requested a file to be
# called after `project()`, so chain call them.
if(DEFINED _RAPIDS_PREVIOUS_CMAKE_PROJECT_INCLUDE)
include("${_RAPIDS_PREVIOUS_CMAKE_PROJECT_INCLUDE}")
endif()
#
# Used by rapids_cuda_init_architectures to allow the `project()` call to invoke the
# rapids_cuda_set_architectures function after compiler detection
#
rapids_cuda_set_architectures(NATIVE)
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cython/add_rpath_entries.cmake | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cython_add_rpath_entries
-------------------------------
.. versionadded:: v22.12.00
Set the RPATH entries for all targets associated with a provided associated target.
.. code-block:: cmake
rapids_cython_add_rpath_entries(
TARGET <associated_target>
PATHS <path1> <path2> ...
[ROOT_DIRECTORY <root-dir>]
)
This function will affect all targets created up to the point of this call. It
will have no effect on targets created afterwards.
``TARGET``
The associated target for which we are setting RPATH entries. Any target
created using :cmake:command:`rapids_cython_create_modules` with the argument
`ASSOCIATED_TARGET associated_target` will have its RPATH entries updated.
``PATHS``
The paths to add to the RPATH. Paths may either be absolute or relative to
the ROOT_DIRECTORY. The paths are always converted to be relative to the
current directory i.e relative to $ORIGIN in the RPATH.
``ROOT_DIRECTORY``
The ROOT_DIRECTORY for the provided paths. Defaults to ${PROJECT_SOURCE_DIR}.
Has no effect on absolute paths. If the ROOT_DIRECTORY is a relative path, it
is assumed to be relative to the directory from which
`rapids_cython_add_rpath_entries` is called.
#]=======================================================================]
function(rapids_cython_add_rpath_entries)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cython.add_rpath_entries")
set(options)
set(one_value ROOT_DIRECTORY TARGET)
set(multi_value PATHS)
cmake_parse_arguments(_RAPIDS_CYTHON "${options}" "${one_value}" "${multi_value}" ${ARGN})
# By default paths are relative to the current project root.
if(NOT _RAPIDS_CYTHON_ROOT_DIRECTORY)
set(_RAPIDS_CYTHON_ROOT_DIRECTORY "${PROJECT_SOURCE_DIR}")
endif()
# Transform all paths to paths relative to the current directory.
set(cleaned_paths)
cmake_path(ABSOLUTE_PATH _RAPIDS_CYTHON_ROOT_DIRECTORY)
foreach(path IN LISTS _RAPIDS_CYTHON_PATHS)
if(NOT IS_ABSOLUTE path)
cmake_path(ABSOLUTE_PATH path BASE_DIRECTORY "${_RAPIDS_CYTHON_ROOT_DIRECTORY}")
endif()
list(APPEND cleaned_paths "${path}")
endforeach()
if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
set(platform_rpath_origin "@loader_path")
else()
set(platform_rpath_origin "$ORIGIN")
endif()
get_property(targets GLOBAL PROPERTY "rapids_cython_associations_${_RAPIDS_CYTHON_TARGET}")
foreach(target IN LISTS targets)
# Compute the path relative to the current target.
set(target_paths)
get_target_property(target_source_dir ${target} SOURCE_DIR)
foreach(target_path IN LISTS cleaned_paths)
cmake_path(RELATIVE_PATH target_path BASE_DIRECTORY "${target_source_dir}")
list(APPEND target_paths "${platform_rpath_origin}/${target_path}")
endforeach()
list(JOIN target_paths ";" target_paths)
set_property(TARGET ${target} APPEND PROPERTY INSTALL_RPATH "${target_paths}")
endforeach()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cython/init.cmake | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cython_init
------------------
.. versionadded:: v22.06.00
Perform standard initialization of any CMake build using scikit-build to create Python extension modules with Cython.
.. code-block:: cmake
rapids_cython_init()
.. note::
Use of the rapids-cython component of rapids-cmake requires scikit-build. The behavior of the functions provided by
this component is undefined if they are invoked outside of a build managed by scikit-build.
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`RAPIDS_CYTHON_INITIALIZED` will be set to TRUE.
:cmake:variable:`CYTHON_FLAGS` will be set to a standard set of a flags to pass to the command line cython invocation.
#]=======================================================================]
macro(rapids_cython_init)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cython.init")
# Only initialize once.
if(NOT DEFINED RAPIDS_CYTHON_INITIALIZED)
# Verify that we are using scikit-build.
if(NOT DEFINED SKBUILD)
message(WARNING "rapids-cython expects scikit-build to be active before being used. \
The SKBUILD variable is not currently set, indicating that scikit-build \
is not active, so builds may behave unexpectedly.")
else()
# Access the variable to avoid unused variable warnings."
message(TRACE "Accessing SKBUILD variable ${SKBUILD}")
endif()
find_package(PythonExtensions REQUIRED)
find_package(Cython REQUIRED)
# Incorporate scikit-build patches.
include("${rapids-cmake-dir}/cython/detail/skbuild_patches.cmake")
if(NOT CYTHON_FLAGS)
set(CYTHON_FLAGS "--directive binding=True,embedsignature=True,always_allow_keywords=True")
endif()
# Flag
set(RAPIDS_CYTHON_INITIALIZED TRUE)
endif()
endmacro()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cython/create_modules.cmake | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cython_create_modules
----------------------------
.. versionadded:: v22.06.00
Generate C(++) from Cython and create Python modules.
.. code-block:: cmake
rapids_cython_create_modules([CXX] [SOURCE_FILES <src1> <src2> ...] [LINKED_LIBRARIES <lib1> <lib2> ... ] [INSTALL_DIR <install_path>] [MODULE_PREFIX <module_prefix>] )
Creates a Cython target for each provided source file, then adds a
corresponding Python extension module. Each built library has its RPATH set to
$ORIGIN.
.. note::
Requires :cmake:command:`rapids_cython_init` to be called before usage.
``CXX``
Flag indicating that the Cython files need to generate C++ rather than C.
``SOURCE_FILES``
The list of Cython source files to be built into Python extension modules.
Note that this function assumes that Cython source files have a one-one
correspondence with extension modules to build, i.e. for every `<Name>.pyx`
in SOURCE_FILES we assume that `<Name>.pyx` is a Cython source file for which
an extension module `<Name>` should be built.
``LINKED_LIBRARIES``
The list of libraries that need to be linked into all modules. In RAPIDS,
this list usually contains (at minimum) the corresponding C++ libraries.
``INSTALL_DIR``
The path relative to the installation prefix so that it can be converted to
an absolute path in a relocatable way. If not provided, defaults to the path
to CMAKE_CURRENT_SOURCE_DIR relative to PROJECT_SOURCE_DIR.
``MODULE_PREFIX``
A prefix used to name the generated library targets. This functionality is
useful when multiple Cython modules in different subpackages of the the same
project have the same name. The default prefix is the empty string.
``ASSOCIATED_TARGETS``
A list of targets that are associated with the Cython targets created in this
function. The target<-->associated target mapping is stored and may be
leveraged by the following functions:
- :cmake:command:`rapids_cython_add_rpath_entries` accepts a path for an
associated target and updates the RPATH of each target with which that
associated target is associated.
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`RAPIDS_CYTHON_CREATED_TARGETS` will be set to a list of
targets created by this function.
#]=======================================================================]
function(rapids_cython_create_modules)
include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/detail/verify_init.cmake)
rapids_cython_verify_init()
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cython.create_modules")
set(_rapids_cython_options CXX)
set(_rapids_cython_one_value INSTALL_DIR MODULE_PREFIX)
set(_rapids_cython_multi_value SOURCE_FILES LINKED_LIBRARIES ASSOCIATED_TARGETS)
cmake_parse_arguments(_RAPIDS_CYTHON "${_rapids_cython_options}" "${_rapids_cython_one_value}"
"${_rapids_cython_multi_value}" ${ARGN})
set(language "C")
if(_RAPIDS_CYTHON_CXX)
set(language "CXX")
endif()
set(CREATED_TARGETS "")
if(NOT DEFINED _RAPIDS_CYTHON_MODULE_PREFIX)
set(_RAPIDS_CYTHON_MODULE_PREFIX "")
endif()
foreach(cython_filename IN LISTS _RAPIDS_CYTHON_SOURCE_FILES)
# Generate a reasonable module name.
cmake_path(GET cython_filename FILENAME cython_module)
cmake_path(REMOVE_EXTENSION cython_module)
# Save the name of the module without the provided prefix so that we can control the output.
set(cython_module_filename "${cython_module}")
string(PREPEND cython_module ${_RAPIDS_CYTHON_MODULE_PREFIX})
# Generate C++ from Cython and create a library for the resulting extension module to compile.
add_cython_target(${cython_module_filename} "${cython_filename}" ${language} PY3 OUTPUT_VAR
cythonized_file)
add_library(${cython_module} MODULE ${cythonized_file})
python_extension_module(${cython_module})
# The final library name must match the original filename and must ignore the prefix.
set_target_properties(${cython_module} PROPERTIES LIBRARY_OUTPUT_NAME ${cython_module_filename})
# Link the module to the requested libraries
if(DEFINED _RAPIDS_CYTHON_LINKED_LIBRARIES)
target_link_libraries(${cython_module} ${_RAPIDS_CYTHON_LINKED_LIBRARIES})
endif()
# Compute the install directory relative to the source and rely on installs being relative to
# the CMAKE_PREFIX_PATH for e.g. editable installs.
if(NOT DEFINED _RAPIDS_CYTHON_INSTALL_DIR)
cmake_path(RELATIVE_PATH CMAKE_CURRENT_SOURCE_DIR BASE_DIRECTORY "${PROJECT_SOURCE_DIR}"
OUTPUT_VARIABLE _RAPIDS_CYTHON_INSTALL_DIR)
endif()
install(TARGETS ${cython_module} DESTINATION ${_RAPIDS_CYTHON_INSTALL_DIR})
# Default the INSTALL_RPATH for all modules to $ORIGIN.
if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
set(platform_rpath_origin "@loader_path")
else()
set(platform_rpath_origin "$ORIGIN")
endif()
set_target_properties(${cython_module} PROPERTIES INSTALL_RPATH "${platform_rpath_origin}")
# Store any provided associated targets in a global list
foreach(associated_target IN LISTS _RAPIDS_CYTHON_ASSOCIATED_TARGETS)
set_property(GLOBAL PROPERTY "rapids_cython_associations_${associated_target}"
"${cython_module}" APPEND)
endforeach()
list(APPEND CREATED_TARGETS "${cython_module}")
endforeach()
set(RAPIDS_CYTHON_CREATED_TARGETS ${CREATED_TARGETS} PARENT_SCOPE)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cython | rapidsai_public_repos/rapids-cmake/rapids-cmake/cython/detail/verify_init.cmake | # =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cython_verify_init
-------------------------
.. versionadded:: v22.06.00
Simple helper function for rapids-cython components to verify that rapids_cython_init has been called before they proceed.
.. code-block:: cmake
rapids_cython_verify_init()
#]=======================================================================]
function(rapids_cython_verify_init)
if(NOT DEFINED RAPIDS_CYTHON_INITIALIZED)
message(FATAL_ERROR "You must call rapids_cython_init before calling this function")
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cython | rapidsai_public_repos/rapids-cmake/rapids-cmake/cython/detail/skbuild_patches.cmake | # =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
_set_python_extension_symbol_visibility
---------------------------------------
.. versionadded:: v22.06.00
The original version of this function in scikit-build runs a linker script to
modify the visibility of symbols. This version is a patch to avoid overwriting
the visibility of symbols because otherwise any library that exports symbols
with external linkage will have the visibility of those symbols changed
undesirably. We can remove this function once this issue is resolved in
scikit-build.
Issue: https://github.com/scikit-build/scikit-build/issues/668
PR: https://github.com/scikit-build/scikit-build/pull/703
#]=======================================================================]
function(_set_python_extension_symbol_visibility _target)
include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/verify_init.cmake)
rapids_cython_verify_init()
if(PYTHON_VERSION_MAJOR VERSION_GREATER 2)
set(_modinit_prefix "PyInit_")
else()
set(_modinit_prefix "init")
endif()
message("_modinit_prefix:${_modinit_prefix}")
if("${CMAKE_C_COMPILER_ID}" STREQUAL "MSVC")
set_target_properties(${_target} PROPERTIES LINK_FLAGS "/EXPORT:${_modinit_prefix}${_target}")
elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU" AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
set(_script_path ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${_target}-version-script.map)
file(WRITE ${_script_path} # Note: The change is to this script, which does not indiscriminately
# mark all non PyInit symbols as local.
"{global: ${_modinit_prefix}${_target}; };")
set_property(TARGET ${_target} APPEND_STRING
PROPERTY LINK_FLAGS " -Wl,--version-script=\"${_script_path}\"")
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/write_version_file.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cmake_write_version_file
-------------------------------
.. versionadded:: v21.08.00
Generate a C++ header file that hold the version (`X.Y.Z`) information of the calling project.
.. code-block:: cmake
rapids_cmake_write_version_file(file_path [PREFIX <prefix>])
The file generated by :cmake:command:`rapids_cmake_write_version_file` holds the separate components
of the `X.Y.Z` version string set by the CMake :cmake:command:`project <cmake:command:project>` call
as C++ defines.
``PREFIX``
Prefix for all the C++ macros. By default if not explicitly specified it will be equal
to the projects name ( CMake variable :cmake:variable:`PROJECT_NAME <cmake:variable:PROJECT_NAME>` ).
The generated file will be contain the following unconditionally defines:
- #define <PREFIX>_VERSION_MAJOR # CMake's PROJECT_VERSION_MAJOR (X)
- #define <PREFIX>_VERSION_MINOR # CMake's PROJECT_VERSION_MINOR (Y)
- #define <PREFIX>_VERSION_PATCH # CMake's PROJECT_VERSION_PATCH (Z)
Each of the components will have all leading zeroes removed as we presume
all components of the version can be represented as decimal values.
.. note::
If a component doesn't exist, zero will be used as a placeholder value.
For example version 2.4 the PATCH value will become 0.
``file_path``
Either an absolute or relative path.
When a relative path, the absolute location will be computed from
:cmake:variable:`CMAKE_CURRENT_BINARY_DIR <cmake:variable:CMAKE_CURRENT_BINARY_DIR>`
#]=======================================================================]
function(rapids_cmake_write_version_file file_path)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cmake.write_version_file")
set(options "")
set(one_value PREFIX)
set(multi_value "")
cmake_parse_arguments(_RAPIDS "${options}" "${one_value}" "${multi_value}" ${ARGN})
cmake_path(IS_RELATIVE file_path is_relative)
if(is_relative)
cmake_path(APPEND CMAKE_CURRENT_BINARY_DIR ${file_path} OUTPUT_VARIABLE output_path)
else()
set(output_path "${file_path}")
endif()
if(NOT _RAPIDS_PREFIX)
set(_RAPIDS_PREFIX "${PROJECT_NAME}")
endif()
if(PROJECT_VERSION_MAJOR)
math(EXPR _RAPIDS_WRITE_MAJOR "${PROJECT_VERSION_MAJOR} + 0" OUTPUT_FORMAT DECIMAL)
else()
set(_RAPIDS_WRITE_MAJOR 0)
endif()
if(PROJECT_VERSION_MINOR)
math(EXPR _RAPIDS_WRITE_MINOR "${PROJECT_VERSION_MINOR} + 0" OUTPUT_FORMAT DECIMAL)
else()
set(_RAPIDS_WRITE_MINOR 0)
endif()
if(PROJECT_VERSION_PATCH)
math(EXPR _RAPIDS_WRITE_PATCH "${PROJECT_VERSION_PATCH} + 0" OUTPUT_FORMAT DECIMAL)
else()
set(_RAPIDS_WRITE_PATCH 0)
endif()
configure_file("${CMAKE_CURRENT_FUNCTION_LIST_DIR}/template/version.hpp.in" "${output_path}"
@ONLY)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/install_lib_dir.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cmake_install_lib_dir
------------------------------
.. versionadded:: v21.10.00
Establish a variable that holds the library installation directory.
.. code-block:: cmake
rapids_cmake_install_lib_dir( out_variable_name [MODIFY_INSTALL_LIBDIR] )
Establishes a variable that holds the correct library installation directory
( lib or lib64 or lib/<multiarch-tuple> ). This function is CONDA aware and
will return `lib` when it detects a project is installing in the CONDA_PREFIX
Also offers the ability to modify :cmake:command:`CMAKE_INSTALL_LIBDIR <cmake:command:install>` to
be the computed installation directory.
Result Variables
^^^^^^^^^^^^^^^^
:cmake:command:`CMAKE_INSTALL_LIBDIR <cmake:command:install>` will be modified to be the computed relative directory
(lib or lib64 or lib/<multiarch-tuple>) when `MODIFY_INSTALL_LIBDIR` is provided
#]=======================================================================]
function(rapids_cmake_install_lib_dir out_variable_name)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cmake.install_lib_dir")
set(modify_install_libdir FALSE)
if(ARGV1 STREQUAL "MODIFY_INSTALL_LIBDIR")
set(modify_install_libdir TRUE)
endif()
set(install_prefix "${CMAKE_INSTALL_PREFIX}")
cmake_path(ABSOLUTE_PATH install_prefix NORMALIZE)
set(use_conda_lib_dir FALSE)
set(computed_path)
# We need to defer to GNUInstallDirs but not allow it to set CMAKE_INSTALL_LIBDIR
set(remove_install_dir TRUE)
if(DEFINED CMAKE_INSTALL_LIBDIR)
set(remove_install_dir FALSE)
endif()
include(GNUInstallDirs)
set(computed_path "${CMAKE_INSTALL_LIBDIR}")
if(modify_install_libdir)
# GNUInstallDirs will have set `CMAKE_INSTALL_LIBDIR` as a cache path so we only need to make
# sure our path overrides any local variable
set(CMAKE_INSTALL_LIBDIR ${computed_path} PARENT_SCOPE)
endif()
if(remove_install_dir)
unset(CMAKE_INSTALL_LIBDIR CACHE)
endif()
set(${out_variable_name} ${computed_path} PARENT_SCOPE)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/parse_version.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cmake_parse_version
--------------------------
.. versionadded:: v21.06.00
Extract components of a `X.Y.Z` or `X.Y` version string in a consistent manner
.. code-block:: cmake
rapids_cmake_parse_version( [MAJOR|MINOR|PATCH|MAJOR_MINOR] version out_variable_name)
Offers the ability to extract components of any 2 or 3 component version string without
having to write complex regular expressions.
``MAJOR``
Extract the first component (`X`) from `version` and place it in the variable
named in `out_variable_name`
``MINOR``
Extract the second component (`Y`) from `version` and place it in the variable
named in `out_variable_name`
``PATCH``
Extract the third component (`Z`) from `version` and place it in the variable
named in `out_variable_name`. If no `Z` component exists for `version` nothing
will happen.
``MAJOR_MINOR``
Extract the first and second component (`X.Y`) from `version` and place it in the variable
named in `out_variable_name` using the pattern `X.Y`.
Example on how to properly use :cmake:command:`rapids_cmake_parse_version`:
.. code-block:: cmake
project(Example VERSION 43.01.0)
rapids_cmake_parse_version(MAJOR_MINOR ${PROJECT_VERSION} major_minor)
message(STATUS "The major.minor version is: ${major_minor}")
Result Variables
^^^^^^^^^^^^^^^^
The variable `out_variable_name` will be created/modified only when the version extraction
is successful
#]=======================================================================]
function(rapids_cmake_parse_version mode version_value out_variable_name)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cmake.parse_version")
# target exists early terminate
string(TOUPPER ${mode} mode)
string(REPLACE "." ";" version_as_list "${version_value}")
list(LENGTH version_as_list len)
# Extract each component and make sure they aren't empty before setting. Enforces the rule that a
# value/character must exist between each `.`
if(mode STREQUAL "MAJOR" AND len GREATER_EQUAL 1)
list(GET version_as_list 0 extracted_component)
if(NOT extracted_component STREQUAL "")
set(${out_variable_name} ${extracted_component} PARENT_SCOPE)
endif()
elseif(mode STREQUAL "MINOR" AND len GREATER_EQUAL 2)
list(GET version_as_list 1 extracted_component)
if(NOT extracted_component STREQUAL "")
set(${out_variable_name} ${extracted_component} PARENT_SCOPE)
endif()
elseif(mode STREQUAL "PATCH" AND len GREATER_EQUAL 3)
list(GET version_as_list 2 extracted_component)
if(NOT extracted_component STREQUAL "")
set(${out_variable_name} ${extracted_component} PARENT_SCOPE)
endif()
elseif(mode STREQUAL "MAJOR_MINOR" AND len GREATER_EQUAL 2)
list(GET version_as_list 0 extracted_major)
list(GET version_as_list 1 extracted_minor)
if(NOT extracted_major STREQUAL "" AND NOT extracted_minor STREQUAL "")
set(${out_variable_name} "${extracted_major}.${extracted_minor}" PARENT_SCOPE)
endif()
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/make_global.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cmake_make_global
------------------------
.. versionadded:: v21.06.00
Make sure all provided targets have global visibility no matter how they
are constructed.
.. code-block:: cmake
rapids_cmake_make_global(target_var)
CMake targets have visibility or scope where they can be referenced by name.
Any built-in target such as those created by :cmake:command:`add_library <cmake:command:add_library>` have
global visibility. Targets created with :cmake:command:`add_library(IMPORTED) <cmake:command:add_library>` by
default have directory visibility. This causes problems when trying to reason
about targets created by `CPM`, as they could be either of the above.
This function promotes the set of targets provided to have global visibility.
This makes it easier for users to reason about when/where they can reference
the targets.
``target_var``
Holds the variable that lists all targets that should be promoted to
GLOBAL scope
#]=======================================================================]
function(rapids_cmake_make_global target_var)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cmake.make_global")
foreach(target IN LISTS ${target_var})
if(TARGET ${target})
get_target_property(aliased_target ${target} ALIASED_TARGET)
if(aliased_target)
continue()
endif()
get_target_property(is_imported ${target} IMPORTED)
get_target_property(already_global ${target} IMPORTED_GLOBAL)
if(is_imported AND NOT already_global)
set_target_properties(${target} PROPERTIES IMPORTED_GLOBAL TRUE)
endif()
endif()
endforeach()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/build_type.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cmake_build_type
-----------------------
.. versionadded:: v21.06.00
Establish the :cmake:variable:`CMAKE_BUILD_TYPE <cmake:variable:CMAKE_BUILD_TYPE>` default value.
.. code-block:: cmake
rapids_cmake_build_type(default_type)
If the generator is `Ninja` or `Makefile` the :cmake:variable:`CMAKE_BUILD_TYPE <cmake:variable:CMAKE_BUILD_TYPE>`
variable will be established if not explicitly set by the user either by
the env variable `CMAKE_BUILD_TYPE` or by passing `-DCMAKE_BUILD_TYPE=`. This removes
situations where the `No-Config` / `Empty` build type is used.
``default_type``
The default build type to use if one doesn't already exist
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`CMAKE_BUILD_TYPE <cmake:variable:CMAKE_BUILD_TYPE>` will be set to ``default_type`` if not already set
#]=======================================================================]
function(rapids_cmake_build_type default_type)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cmake.build_type")
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(VERBOSE "Setting build type to '${default_type}' since none specified.")
set(CMAKE_BUILD_TYPE "${default_type}" CACHE STRING "Choose the type of build." FORCE)
# Set the possible values of build type for cmake-gui
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel"
"RelWithDebInfo")
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/support_conda_env.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cmake_support_conda_env
------------------------------
.. versionadded:: v21.06.00
Establish a target that holds the CONDA include and link directories.
.. code-block:: cmake
rapids_cmake_support_conda_env( <target_name> [MODIFY_PREFIX_PATH] )
Creates a global interface target called `target_name` that holds
the CONDA include and link directories, when executed.
Also offers the ability to modify :cmake:variable:`CMAKE_PREFIX_PATH <cmake:variable:CMAKE_PREFIX_PATH>` to
include the following paths based on the current conda environment:
- `PREFIX`
- `BUILD_PREFIX`
- `CONDA_PREFIX`
.. versionadded:: v23.08.00
- `PREFIX`/targets/<cuda_target_platform>/
``MODIFY_PREFIX_PATH``
When in a conda build environment the contents of `$ENV{PREFIX}`,
`$ENV{PREFIX}`/targets/<cuda_target_platform>/`, and `$ENV{BUILD_PREFIX}` will be inserted to the
front of :cmake:variable:`CMAKE_PREFIX_PATH <cmake:variable:CMAKE_PREFIX_PATH>`.
When in a conda environment the contents of `$ENV{CONDA_PREFIX}` will be inserted to
the front of :cmake:variable:`CMAKE_PREFIX_PATH <cmake:variable:CMAKE_PREFIX_PATH>`.
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`CMAKE_PREFIX_PATH <cmake:variable:CMAKE_PREFIX_PATH>` will be modified when `MODIFY_PREFIX_PATH` is provided
and called from a conda environment.
Result Targets
^^^^^^^^^^^^^^^^
`target_name` target will be created only if called from a conda environment.
#]=======================================================================]
# cmake-lint: disable=R0912,R0915,W0106
function(rapids_cmake_support_conda_env target)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cmake.support_conda_env")
# target exists early terminate
if(TARGET ${target})
return()
endif()
if("$ENV{CONDA_BUILD}" STREQUAL "1")
set(in_conda_build True)
elseif(DEFINED ENV{CONDA_PREFIX})
set(in_conda_prefix True)
endif()
if(in_conda_build OR in_conda_prefix)
# comment needed here due to cmake-lint bug
macro(modify_cmake_prefix_path_global)
cmake_parse_arguments(_RAPIDS "" "" "PATHS" ${ARGN})
if(DEFINED ENV{CMAKE_PREFIX_PATH})
# If both CMAKE_PREFIX_PATH cmake and environment variables are populated, ensure the
# environment variable's paths are preserved in the cmake variable
cmake_path(CONVERT "$ENV{CMAKE_PREFIX_PATH}" TO_CMAKE_PATH_LIST _paths NORMALIZE)
list(PREPEND _RAPIDS_PATHS ${_paths})
endif()
list(APPEND CMAKE_PREFIX_PATH ${_RAPIDS_PATHS})
list(REMOVE_DUPLICATES CMAKE_PREFIX_PATH)
set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} PARENT_SCOPE)
message(VERBOSE "CMAKE_PREFIX_PATH set to: ${CMAKE_PREFIX_PATH}")
endmacro()
# comment needed here due to cmake-lint bug
macro(modify_cmake_prefix_path_envvar)
cmake_parse_arguments(_RAPIDS "" "" "PATHS" ${ARGN})
cmake_path(CONVERT "$ENV{CMAKE_PREFIX_PATH}" TO_CMAKE_PATH_LIST _paths NORMALIZE)
list(APPEND _paths ${_RAPIDS_PATHS})
list(REMOVE_DUPLICATES _paths)
cmake_path(CONVERT "${_paths}" TO_NATIVE_PATH_LIST _paths NORMALIZE)
# cmake-lint: disable=W0106
set(ENV{CMAKE_PREFIX_PATH} ${_paths})
# cmake-lint: disable=W0106
message(VERBOSE "ENV{CMAKE_PREFIX_PATH} set to: $ENV{CMAKE_PREFIX_PATH}")
endmacro()
# comment needed here due to cmake-lint bug
macro(modify_cmake_prefix_path)
if(DEFINED CMAKE_PREFIX_PATH)
modify_cmake_prefix_path_global(${ARGN})
else()
modify_cmake_prefix_path_envvar(${ARGN})
endif()
endmacro()
if(ARGV1 STREQUAL "MODIFY_PREFIX_PATH")
set(modify_prefix_path TRUE)
endif()
add_library(${target} INTERFACE)
if(in_conda_build)
# For conda-build we add the host conda environment prefix to the cmake search paths so that
# raw `find_file` or `find_library` calls will find CUDA components in the host environment
set(target_platform $ENV{cross_target_platform}) # when target != cross_target
if(NOT target_platform)
set(target_platform $ENV{target_platform})
endif()
if("${target_platform}" STREQUAL "linux-64")
set(targetsDir "targets/x86_64-linux")
elseif("${target_platform}" STREQUAL "linux-ppc64le")
set(targetsDir "targets/ppc64le-linux")
elseif("${target_platform}" STREQUAL "linux-aarch64")
set(targetsDir "targets/sbsa-linux")
endif()
target_include_directories(${target} INTERFACE "$ENV{PREFIX}/include"
"$ENV{BUILD_PREFIX}/include")
target_link_directories(${target} INTERFACE "$ENV{PREFIX}/lib" "$ENV{BUILD_PREFIX}/lib")
if(DEFINED CMAKE_SHARED_LIBRARY_RPATH_LINK_CUDA_FLAG
OR DEFINED CMAKE_SHARED_LIBRARY_RPATH_LINK_CXX_FLAG)
if(DEFINED targetsDir)
target_link_options(${target} INTERFACE
"$<HOST_LINK:SHELL:LINKER:-rpath-link=$ENV{PREFIX}/${targetsDir}/lib>"
)
endif()
target_link_options(${target} INTERFACE
"$<HOST_LINK:SHELL:LINKER:-rpath-link=$ENV{PREFIX}/lib>")
target_link_options(${target} INTERFACE
"$<HOST_LINK:SHELL:LINKER:-rpath-link=$ENV{BUILD_PREFIX}/lib>")
endif()
if(modify_prefix_path)
message(VERBOSE "Conda build detected")
set(prefix_paths "$ENV{PREFIX}" "$ENV{BUILD_PREFIX}")
if(DEFINED targetsDir)
list(PREPEND prefix_paths "$ENV{PREFIX}/${targetsDir}")
endif()
modify_cmake_prefix_path(PATHS ${prefix_paths})
endif()
elseif(in_conda_prefix)
target_include_directories(${target} INTERFACE "$ENV{CONDA_PREFIX}/include")
target_link_directories(${target} INTERFACE "$ENV{CONDA_PREFIX}/lib")
if(DEFINED CMAKE_SHARED_LIBRARY_RPATH_LINK_CUDA_FLAG
OR DEFINED CMAKE_SHARED_LIBRARY_RPATH_LINK_CXX_FLAG)
target_link_options(${target} INTERFACE
"$<HOST_LINK:SHELL:LINKER:-rpath-link=$ENV{CONDA_PREFIX}/lib>")
endif()
if(modify_prefix_path)
message(VERBOSE "Conda environment detected")
modify_cmake_prefix_path(PATHS "$ENV{CONDA_PREFIX}")
endif()
endif()
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/write_git_revision_file.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cmake_write_git_revision_file
------------------------------------
.. versionadded:: v21.10.00
Generate a C++ header file that holds git revision information of the calling project.
.. code-block:: cmake
rapids_cmake_write_git_revision_file(<target_name> file_path [PREFIX <prefix>])
Creates a global interface target called `target_name` that holds the includes to
the generated header with the macros for git branch, sha1, version, and if any uncommitted
changes exist. Users of the header file must use
:cmake:command:`target_link_libraries <cmake:command:target_link_libraries>` to the target
so that the header is generated before it is used.
``PREFIX``
Prefix for all the C++ macros. By default if not explicitly specified it will be equal
to the projects name ( CMake variable `PROJECT_NAME` ).
This information will be recorded in the following defines:
- <PREFIX>_GIT_BRANCH
Will store the current git branch name, otherwise when in a detached HEAD state will
store `HEAD`.
- <PREFIX>_GIT_SHA1
Will store the full SHA1 for the current git commit if one exists.
- <PREFIX>_GIT_IS_DIRTY
Will exist if any git tracked file has any modifications that aren't committed ( dirty ).
- <PREFIX>_GIT_VERSION
Will store `<tag>[-<distance>-g<sha1>[-dirty]]` computed from running
`git describe --tags --dirty --always`. For example "v21.10.00a-18-g7efb04f-dirty" indicates
that the latest commit is "7efb04f" but has uncommitted changes (`-dirty`), and
that we are "18" commits after tag "v21.10.00a".
``file_path``
Either an absolute or relative path.
When a relative path, the absolute location will be computed from
:cmake:variable:`CMAKE_CURRENT_BINARY_DIR <cmake:variable:CMAKE_CURRENT_BINARY_DIR>`
.. note::
If `git` doesn't exist or the project doesn't use `git`, the header
will still be written. The branch, sha1, and version defines will be set to
`unknown` and the project won't be considered dirty.
Result Targets
^^^^^^^^^^^^^^^^
`target_name` target will be created. Consuming libraries/executables
of the generated header must use the target via
:cmake:command:`target_link_libraries <cmake:command:target_link_libraries>`
for correct builds.
#]=======================================================================]
function(rapids_cmake_write_git_revision_file target file_path)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cmake.write_git_revision_file")
set(options "")
set(one_value PREFIX)
set(multi_value "")
cmake_parse_arguments(_RAPIDS "${options}" "${one_value}" "${multi_value}" ${ARGN})
cmake_path(IS_RELATIVE file_path is_relative)
if(is_relative)
cmake_path(APPEND CMAKE_CURRENT_BINARY_DIR ${file_path} OUTPUT_VARIABLE output_path)
else()
set(output_path "${file_path}")
endif()
if(NOT _RAPIDS_PREFIX)
set(_RAPIDS_PREFIX "${PROJECT_NAME}")
endif()
# Find Git
find_package(Git QUIET)
add_custom_target(${target}_compute_git_info ALL
BYPRODUCTS "${file_path}"
COMMENT "Generate git revision file for ${target}"
COMMAND ${CMAKE_COMMAND} -DWORKING_DIRECTORY=${CMAKE_CURRENT_SOURCE_DIR}
-DGIT_EXECUTABLE=${GIT_EXECUTABLE}
-D_RAPIDS_GIT_PREFIX=${_RAPIDS_PREFIX}
-DTEMPLATE_FILE=${CMAKE_CURRENT_FUNCTION_LIST_DIR}/template/git_revision.hpp.in
-DFILE_TO_WRITE=${file_path} -P
${CMAKE_CURRENT_FUNCTION_LIST_DIR}/detail/compute_git_info.cmake
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
# Generate a target that depends on compute_git_info This is what other targets will use to get
# the build path and makes sure that we have correct parallel builds
add_library(${target} INTERFACE)
add_dependencies(${target} ${target}_compute_git_info)
cmake_path(GET file_path PARENT_PATH file_path_dir)
target_include_directories(${target} INTERFACE "$<BUILD_INTERFACE:${file_path_dir}>")
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/detail/policy.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cmake_policy
-------------------
.. versionadded:: v23.02.00
Prints rapids-cmake deprecated warnings
.. code-block:: cmake
rapids_cmake_policy( DEPRECATED_IN <version> REMOVED_IN <version> MESSAGE <content>)
#]=======================================================================]
function(rapids_cmake_policy)
set(options "")
set(one_value DEPRECATED_IN REMOVED_IN MESSAGE)
set(multi_value "")
cmake_parse_arguments(_RAPIDS_POLICY "${options}" "${one_value}" "${multi_value}" ${ARGN})
if(NOT DEFINED rapids-cmake-version)
include("${rapids-cmake-dir}/rapids-version.cmake")
endif()
set(_RAPIDS_POLICY_CALLERS_VERSION ${rapids-cmake-version})
set(policy_context_text
"rapids-cmake policy [deprecated=${_RAPIDS_POLICY_DEPRECATED_IN} removed=${_RAPIDS_POLICY_REMOVED_IN}]:"
)
set(policy_mode DEPRECATION)
message(STATUS "_RAPIDS_POLICY_CALLERS_VERSION: ${_RAPIDS_POLICY_CALLERS_VERSION}")
message(STATUS "_RAPIDS_POLICY_REMOVED_IN: ${_RAPIDS_POLICY_REMOVED_IN}")
if(_RAPIDS_POLICY_CALLERS_VERSION VERSION_GREATER_EQUAL ${_RAPIDS_POLICY_REMOVED_IN})
set(policy_mode FATAL_ERROR)
endif()
set(policy_upgrade_text "")
if(_RAPIDS_POLICY_CALLERS_VERSION VERSION_LESS ${_RAPIDS_POLICY_DEPRECATED_IN})
set(policy_upgrade_text
"You are currently requesting rapids-cmake ${_RAPIDS_POLICY_CALLERS_VERSION} please upgrade to ${_RAPIDS_POLICY_DEPRECATED_IN}."
)
endif()
message(${policy_mode} "${policy_context_text} ${_RAPIDS_POLICY_MESSAGE} ${policy_upgrade_text}")
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/detail/compute_git_info.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
if(GIT_EXECUTABLE AND EXISTS "${GIT_EXECUTABLE}")
execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse HEAD
WORKING_DIRECTORY ${WORKING_DIRECTORY}
ERROR_QUIET
OUTPUT_VARIABLE _RAPIDS_WRITE_SHA1
OUTPUT_STRIP_TRAILING_WHITESPACE # need to strip off any newline
)
execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse --abbrev-ref HEAD
WORKING_DIRECTORY ${WORKING_DIRECTORY}
ERROR_QUIET
OUTPUT_VARIABLE _RAPIDS_WRITE_BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE # need to strip off any newline
)
execute_process(COMMAND ${GIT_EXECUTABLE} describe --tag --dirty --always
WORKING_DIRECTORY ${WORKING_DIRECTORY}
ERROR_QUIET
OUTPUT_VARIABLE _RAPIDS_WRITE_VERSION
OUTPUT_STRIP_TRAILING_WHITESPACE # need to strip off any newline
)
endif()
if(NOT _RAPIDS_WRITE_SHA1)
set(_RAPIDS_WRITE_SHA1 "unknown")
endif()
if(NOT _RAPIDS_WRITE_BRANCH)
set(_RAPIDS_WRITE_BRANCH "unknown")
endif()
if(NOT _RAPIDS_WRITE_VERSION)
set(_RAPIDS_WRITE_VERSION "unknown")
endif()
set(_RAPIDS_GIT_IS_DIRTY 0)
if(_RAPIDS_WRITE_VERSION MATCHES dirty)
set(_RAPIDS_GIT_IS_DIRTY 1)
endif()
configure_file("${TEMPLATE_FILE}" "${FILE_TO_WRITE}" @ONLY)
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/template/version.hpp.in | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#define @_RAPIDS_PREFIX@_VERSION_MAJOR @_RAPIDS_WRITE_MAJOR@
#define @_RAPIDS_PREFIX@_VERSION_MINOR @_RAPIDS_WRITE_MINOR@
#define @_RAPIDS_PREFIX@_VERSION_PATCH @_RAPIDS_WRITE_PATCH@
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cmake/template/git_revision.hpp.in | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#define @_RAPIDS_GIT_PREFIX@_GIT_BRANCH "@_RAPIDS_WRITE_BRANCH@"
#define @_RAPIDS_GIT_PREFIX@_GIT_SHA1 "@_RAPIDS_WRITE_SHA1@"
#define @_RAPIDS_GIT_PREFIX@_GIT_VERSION "@_RAPIDS_WRITE_VERSION@"
#if (@_RAPIDS_GIT_IS_DIRTY@) //
# define @_RAPIDS_GIT_PREFIX@_GIT_IS_DIRTY
#endif
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/find.cmake | #=============================================================================
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_find
---------------
.. versionadded:: v21.06.00
Allow projects to find or build arbitrary projects via `CPM` with built-in
tracking of these dependencies for correct export support.
.. code-block:: cmake
rapids_cpm_find(<PackageName> <version>
[COMPONENTS <components...>]
[GLOBAL_TARGETS <targets...>]
[BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
<CPM_ARGS>
all normal CPM options
)
Generate a CPM FindPackage call and associate this with the listed
build and install export set for correct export generation.
Since the visibility of CMake's targets differ between targets built locally and those
imported, :cmake:command:`rapids_cpm_find` promotes imported targets to be global so users have
consistency. List all targets used by your project in `GLOBAL_TARGET`.
.. note::
Requires :cmake:command:`rapids_cpm_init` to be called before usage
``PackageName``
Name of the package to find.
``version``
Version of the package you would like CPM to find.
``COMPONENTS``
.. versionadded:: v22.10.00
A list of required components that are required to be found for this
package to be considered valid when doing a local search.
``GLOBAL_TARGETS``
Which targets from this package should be made global. This information
will be propagated to any associated export set.
.. versionchanged:: v21.10.00
If any targets listed in `GLOBAL_TARGET` exist when :cmake:command:`rapids_cpm_find` is called
no calls to `CPM` will be executed. This is done for the following reasons:
- Removes the need for the calling code to do the conditional checks
- Allows `BUILD_EXPORT_SET` and `INSTALL_EXPORT_SET` tracking to happen correctly when targets had already been brought it by non-CPM means.
``BUILD_EXPORT_SET``
Record that a :cmake:command:`CPMFindPackage(<PackageName> ...)` call needs to occur as part of
our build directory export set.
``INSTALL_EXPORT_SET``
Record a :cmake:command:`find_dependency(<PackageName> ...) <cmake:module:CMakeFindDependencyMacro>` call needs to occur as part of
our install directory export set.
``CPM_ARGS``
Required placeholder to be provided before any extra arguments that need to
be passed down to :cmake:command:`CPMFindPackage`.
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`<PackageName>_SOURCE_DIR` is set to the path to the source directory of <PackageName>.
:cmake:variable:`<PackageName>_BINARY_DIR` is set to the path to the build directory of <PackageName>.
:cmake:variable:`<PackageName>_ADDED` is set to a true value if <PackageName> has not been added before.
.. note::
Adding an export set to :cmake:command:`rapids_cpm_find` has different behavior
for build and install. Build exports a respective CPM call, since
we presume other CPM packages don't generate a correct build directory
config module. While install exports a `find_dependency` call as
we expect projects to have a valid install setup.
If you need different behavior you will need to use :cmake:command:`rapids_export_package()`
or :cmake:command:`rapids_export_cpm()`.
If :cmake:variable:`CPM_<PackageName>_SOURCE` is set, we use :cmake:command:`CPMAddPackage` instead of
:cmake:command:`CPMFindPackage`. :cmake:command:`CPMAddPackage` always adds the package at the desired
:cmake:variable:`CPM_<PackageName>_SOURCE` location, and won't attempt to locate it via
:cmake:command:`find_package() <cmake:command:find_package>` first.
Examples
^^^^^^^^
Example on how to use :cmake:command:`rapids_cpm_find` to include common projects
.. code-block:: cmake
# fmt
rapids_cpm_find(fmt 8.0.1
GLOBAL_TARGETS fmt::fmt
CPM_ARGS
GITHUB_REPOSITORY fmtlib/fmt
GIT_TAG 8.0.1
GIT_SHALLOW TRUE
)
# google benchmark, no GIT_TAG required since it uses `v<Version>` tags
rapids_cpm_find(benchmark 1.5.2
CPM_ARGS
GIT_REPOSITORY https://github.com/google/benchmark.git
GIT_SHALLOW TRUE
OPTIONS "BENCHMARK_ENABLE_TESTING OFF"
"BENCHMARK_ENABLE_INSTALL OFF"
)
Overriding
^^^^^^^^^^
The :cmake:command:`rapids_cpm_package_override` command provides a way
for projects to override the default values for any :cmake:command:`rapids_cpm_find`, `rapids_cpm_* <../api.html#cpm-pre-configured-packages>`__,
`CPM <https://github.com/cpm-cmake/CPM.cmake>`_, and :cmake:module:`FetchContent() <cmake:module:FetchContent>` package.
By default when an override for a project is provided no local search
for that project will occur. This is done to make sure that the requested
modified version is used.
#]=======================================================================]
# cmake-lint: disable=R0912,R0915
function(rapids_cpm_find name version)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.find")
set(options CPM_ARGS)
set(one_value BUILD_EXPORT_SET INSTALL_EXPORT_SET)
set(multi_value COMPONENTS GLOBAL_TARGETS)
cmake_parse_arguments(_RAPIDS "${options}" "${one_value}" "${multi_value}" ${ARGN})
if(NOT DEFINED _RAPIDS_CPM_ARGS)
message(FATAL_ERROR "rapids_cpm_find requires you to specify CPM_ARGS before any CPM arguments")
endif()
set(package_needs_to_be_added TRUE)
if(_RAPIDS_GLOBAL_TARGETS)
foreach(target IN LISTS _RAPIDS_GLOBAL_TARGETS)
if(TARGET ${target})
set(package_needs_to_be_added FALSE)
break()
endif()
endforeach()
endif()
if(_RAPIDS_COMPONENTS)
# We need to pass the set of components as a space separated string and not a list
string(REPLACE ";" " " _RAPIDS_COMPONENTS "${_RAPIDS_COMPONENTS}")
list(APPEND _RAPIDS_UNPARSED_ARGUMENTS "FIND_PACKAGE_ARGUMENTS"
"COMPONENTS ${_RAPIDS_COMPONENTS}")
endif()
if(package_needs_to_be_added)
if(CPM_${name}_SOURCE)
CPMAddPackage(NAME ${name} VERSION ${version} ${_RAPIDS_UNPARSED_ARGUMENTS})
else()
CPMFindPackage(NAME ${name} VERSION ${version} ${_RAPIDS_UNPARSED_ARGUMENTS})
endif()
else()
# Restore any CPM variables that might be cached
cpm_check_if_package_already_added(${name} ${version})
if(CPM_PACKAGE_ALREADY_ADDED)
cpm_export_variables(${name})
endif()
endif()
set(_rapids_extra_info)
if(_RAPIDS_GLOBAL_TARGETS)
include("${rapids-cmake-dir}/cmake/make_global.cmake")
rapids_cmake_make_global(_RAPIDS_GLOBAL_TARGETS)
list(APPEND _rapids_extra_info "GLOBAL_TARGETS" ${_RAPIDS_GLOBAL_TARGETS})
endif()
if(_RAPIDS_BUILD_EXPORT_SET)
include("${rapids-cmake-dir}/export/cpm.cmake")
rapids_export_cpm(BUILD ${name} ${_RAPIDS_BUILD_EXPORT_SET}
CPM_ARGS NAME ${name} VERSION ${version} ${_RAPIDS_UNPARSED_ARGUMENTS}
${_rapids_extra_info})
endif()
if(_RAPIDS_INSTALL_EXPORT_SET)
include("${rapids-cmake-dir}/export/package.cmake")
if(_RAPIDS_COMPONENTS)
list(APPEND _rapids_extra_info "COMPONENTS" ${_RAPIDS_COMPONENTS})
endif()
rapids_export_package(INSTALL ${name} ${_RAPIDS_INSTALL_EXPORT_SET} VERSION ${version}
${_rapids_extra_info})
endif()
# Propagate up variables that CPMFindPackage provide
set(${name}_SOURCE_DIR "${${name}_SOURCE_DIR}" PARENT_SCOPE)
set(${name}_BINARY_DIR "${${name}_BINARY_DIR}" PARENT_SCOPE)
set(${name}_ADDED "${${name}_ADDED}" PARENT_SCOPE)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/spdlog.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_spdlog
-----------------
.. versionadded:: v21.10.00
Allow projects to find or build `spdlog` via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the version of spdlog :ref:`specified in the version file <cpm_versions>` for consistency
across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_spdlog( [FMT_OPTION <fmt-option-name>]
[BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[<CPM_ARGS> ...])
``FMT_OPTION``
.. versionadded:: v23.04.00
Spdlog depends on the fmt library and offers multiple ways of handling this dependency when spdlog is built. This
option only controls the behavior when spdlog is fetched and built, NOT when an installed spdlog is found on the
system.
This option can be set to: `BUNDLED`, `EXTERNAL_FMT`, `EXTERNAL_FMT_HO`, or `STD_FORMAT`. If set to
`BUNDLED`, then spdlog will use its own bundled version of fmt. If set to `EXTERNAL_FMT` then spdlog will use the
`fmt::fmt` target and be linked with the fmt library. If set to `EXTERNAL_FMT_HO` then spdlog will use the
`fmt::fmt-header-only` target and be linked with a header only fmt library. If set to `STD_FORMAT` then spdlog
will use `std::format` instead of the fmt library.
Defaults to `EXTERNAL_FMT_HO`.
.. |PKG_NAME| replace:: spdlog
.. include:: common_package_args.txt
Result Targets
^^^^^^^^^^^^^^
spdlog::spdlog, spdlog::spdlog_header_only targets will be created
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`spdlog_SOURCE_DIR` is set to the path to the source directory of spdlog.
:cmake:variable:`spdlog_BINARY_DIR` is set to the path to the build directory of spdlog.
:cmake:variable:`spdlog_ADDED` is set to a true value if spdlog has not been added before.
:cmake:variable:`spdlog_VERSION` is set to the version of spdlog specified by the versions.json.
:cmake:variable:`spdlog_fmt_target` is set to the fmt target used, if used
#]=======================================================================]
function(rapids_cpm_spdlog)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.spdlog")
set(options)
set(one_value FMT_OPTION BUILD_EXPORT_SET INSTALL_EXPORT_SET)
set(multi_value)
cmake_parse_arguments(_RAPIDS "${options}" "${one_value}" "${multi_value}" ${ARGN})
# Fix up _RAPIDS_UNPARSED_ARGUMENTS to have EXPORT_SETS as this is need for rapids_cpm_find. Also
# propagate the user provided build and install export sets.
if(_RAPIDS_INSTALL_EXPORT_SET)
list(APPEND _RAPIDS_UNPARSED_ARGUMENTS INSTALL_EXPORT_SET ${_RAPIDS_INSTALL_EXPORT_SET})
endif()
if(_RAPIDS_BUILD_EXPORT_SET)
list(APPEND _RAPIDS_UNPARSED_ARGUMENTS BUILD_EXPORT_SET ${_RAPIDS_BUILD_EXPORT_SET})
endif()
set(to_install OFF)
if(_RAPIDS_INSTALL_EXPORT_SET)
set(to_install ON)
endif()
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(spdlog version repository tag shallow exclude)
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(spdlog ${version} patch_command)
# If the option wasn't passed to the command, default to header only fmt
if(NOT _RAPIDS_FMT_OPTION)
set(_RAPIDS_FMT_OPTION "EXTERNAL_FMT_HO")
endif()
if(_RAPIDS_FMT_OPTION STREQUAL "BUNDLED")
set(spdlog_fmt_option "")
elseif(_RAPIDS_FMT_OPTION STREQUAL "EXTERNAL_FMT")
set(spdlog_fmt_option "SPDLOG_FMT_EXTERNAL ON")
set(spdlog_fmt_target fmt::fmt)
elseif(_RAPIDS_FMT_OPTION STREQUAL "EXTERNAL_FMT_HO")
set(spdlog_fmt_option "SPDLOG_FMT_EXTERNAL_HO ON")
set(spdlog_fmt_target fmt::fmt-header-only)
elseif(_RAPIDS_FMT_OPTION STREQUAL "STD_FORMAT")
set(spdlog_fmt_option "SPDLOG_USE_STD_FORMAT ON")
else()
message(FATAL_ERROR "Invalid option used for FMT_OPTION, got: ${_RAPIDS_FMT_OPTION}, expected one of: 'BUNDLED', 'EXTERNAL_FMT', 'EXTERNAL_FMT_HO', 'STD_FORMAT'"
)
endif()
if(_RAPIDS_FMT_OPTION STREQUAL "EXTERNAL_FMT" OR _RAPIDS_FMT_OPTION STREQUAL "EXTERNAL_FMT_HO")
include("${rapids-cmake-dir}/cpm/fmt.cmake")
# Using `spdlog_ROOT` needs to cause any internal find calls in `spdlog-config.cmake` to first
# search beside it before looking globally.
list(APPEND fmt_ROOT ${spdlog_ROOT})
rapids_cpm_fmt(${_RAPIDS_UNPARSED_ARGUMENTS})
endif()
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(spdlog ${version} ${_RAPIDS_UNPARSED_ARGUMENTS}
GLOBAL_TARGETS spdlog::spdlog spdlog::spdlog_header_only
CPM_ARGS
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command}
EXCLUDE_FROM_ALL ${exclude}
OPTIONS "SPDLOG_INSTALL ${to_install}" "${spdlog_fmt_option}")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(spdlog)
# Propagate up variables that CPMFindPackage provide
set(spdlog_SOURCE_DIR "${spdlog_SOURCE_DIR}" PARENT_SCOPE)
set(spdlog_BINARY_DIR "${spdlog_BINARY_DIR}" PARENT_SCOPE)
set(spdlog_ADDED "${spdlog_ADDED}" PARENT_SCOPE)
set(spdlog_VERSION ${version} PARENT_SCOPE)
set(spdlog_fmt_target ${spdlog_fmt_target} PARENT_SCOPE)
# spdlog creates the correct namespace aliases
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/rmm.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_rmm
--------------
.. versionadded:: v21.10.00
Allow projects to find or build `RMM` via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the current rapids-cmake version of RMM `as specified in the version file <cpm_versions>`
for consistency across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_rmm( [BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[<CPM_ARGS> ...])
.. |PKG_NAME| replace:: rmm
.. include:: common_package_args.txt
Result Targets
^^^^^^^^^^^^^^
rmm::rmm target will be created
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`rmm_SOURCE_DIR` is set to the path to the source directory of RMM.
:cmake:variable:`rmm_BINARY_DIR` is set to the path to the build directory of RMM.
:cmake:variable:`rmm_ADDED` is set to a true value if RMM has not been added before.
:cmake:variable:`rmm_VERSION` is set to the version of RMM specified by the versions.json.
#]=======================================================================]
function(rapids_cpm_rmm)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.rmm")
set(options)
set(one_value INSTALL_EXPORT_SET)
set(multi_value)
cmake_parse_arguments(_RAPIDS "${options}" "${one_value}" "${multi_value}" ${ARGN})
# Fix up RAPIDS_UNPARSED_ARGUMENTS to have EXPORT_SETS as this is need for rapids_cpm_find
if(_RAPIDS_INSTALL_EXPORT_SET)
list(APPEND _RAPIDS_UNPARSED_ARGUMENTS INSTALL_EXPORT_SET ${_RAPIDS_INSTALL_EXPORT_SET})
endif()
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(rmm version repository tag shallow exclude)
set(to_exclude OFF)
if(NOT _RAPIDS_INSTALL_EXPORT_SET OR exclude)
set(to_exclude ON)
endif()
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(rmm ${version} patch_command)
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(rmm ${version} ${ARGN} ${_RAPIDS_UNPARSED_ARGUMENTS}
GLOBAL_TARGETS rmm::rmm
CPM_ARGS
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command}
EXCLUDE_FROM_ALL ${to_exclude}
OPTIONS "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(rmm)
# Propagate up variables that CPMFindPackage provide
set(rmm_SOURCE_DIR "${rmm_SOURCE_DIR}" PARENT_SCOPE)
set(rmm_BINARY_DIR "${rmm_BINARY_DIR}" PARENT_SCOPE)
set(rmm_ADDED "${rmm_ADDED}" PARENT_SCOPE)
set(rmm_VERSION ${version} PARENT_SCOPE)
# rmm creates the correct namespace aliases
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/init.cmake | #=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_init
---------------
.. versionadded:: v21.06.00
Establish the `CPM` and preset package infrastructure for the project.
.. code-block:: cmake
rapids_cpm_init( [OVERRIDE <json_override_file_path> ] )
The CPM module will be downloaded based on the state of :cmake:variable:`CPM_SOURCE_CACHE` and
:cmake:variable:`ENV{CPM_SOURCE_CACHE}`. This allows multiple nested projects to share the
same download of CPM. If those variables aren't set the file will be cached
in the build tree of the calling project
.. versionadded:: v21.10.00
``OVERRIDE``
Override the `CPM` preset package information for the project. The user provided
json file must follow the `versions.json` format, which is :ref:`documented here<cpm_version_format>`.
If the override file doesn't specify a value or package entry the default
version will be used.
.. note::
Must be called before any invocation of :cmake:command:`rapids_cpm_find`.
#]=======================================================================]
function(rapids_cpm_init)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.init")
set(_rapids_options)
set(_rapids_one_value OVERRIDE)
set(_rapids_multi_value)
cmake_parse_arguments(_RAPIDS "${_rapids_options}" "${_rapids_one_value}"
"${_rapids_multi_value}" ${ARGN})
include("${rapids-cmake-dir}/cpm/detail/load_preset_versions.cmake")
rapids_cpm_load_preset_versions()
if(_RAPIDS_OVERRIDE)
include("${rapids-cmake-dir}/cpm/package_override.cmake")
rapids_cpm_package_override("${_RAPIDS_OVERRIDE}")
endif()
include("${rapids-cmake-dir}/cpm/detail/download.cmake")
rapids_cpm_download()
# Propagate up any modified local variables that CPM has changed.
#
# Push up the modified CMAKE_MODULE_PATh to allow `find_package` calls to find packages that CPM
# already added.
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" PARENT_SCOPE)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/gtest.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_gtest
----------------
.. versionadded:: v21.10.00
Allow projects to find or build `Google Test` via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the version of GTest :ref:`specified in the version file <cpm_versions>` for consistency
across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_gtest( [BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[<CPM_ARGS> ...])
.. |PKG_NAME| replace:: GTest
.. include:: common_package_args.txt
Result Targets
^^^^^^^^^^^^^^
GTest::gtest, GTest::gmock, GTest::gtest_main, GTest::gmock_main targets will be created
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`GTest_SOURCE_DIR` is set to the path to the source directory of GTest.
:cmake:variable:`GTest_BINARY_DIR` is set to the path to the build directory of GTest.
:cmake:variable:`GTest_ADDED` is set to a true value if GTest has not been added before.
:cmake:variable:`GTest_VERSION` is set to the version of GTest specified by the versions.json.
#]=======================================================================]
function(rapids_cpm_gtest)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.gtest")
set(to_install OFF)
if(INSTALL_EXPORT_SET IN_LIST ARGN)
set(to_install ON)
endif()
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(GTest version repository tag shallow exclude)
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(GTest ${version} patch_command)
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(GTest ${version} ${ARGN}
GLOBAL_TARGETS GTest::gtest GTest::gmock GTest::gtest_main GTest::gmock_main
CPM_ARGS FIND_PACKAGE_ARGUMENTS "EXACT"
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command}
EXCLUDE_FROM_ALL ${exclude}
OPTIONS "INSTALL_GTEST ${to_install}" "CMAKE_POSITION_INDEPENDENT_CODE ON")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(GTest)
# Propagate up variables that CPMFindPackage provide
set(GTest_SOURCE_DIR "${GTest_SOURCE_DIR}" PARENT_SCOPE)
set(GTest_BINARY_DIR "${GTest_BINARY_DIR}" PARENT_SCOPE)
set(GTest_ADDED "${GTest_ADDED}" PARENT_SCOPE)
set(GTest_VERSION ${version} PARENT_SCOPE)
if(TARGET GTest::gtest AND NOT TARGET GTest::gmock)
message(WARNING "The GTest package found doesn't provide gmock. If you run into 'GTest::gmock target not found' issues you need to use a different version of GTest.The easiest way is to request building GTest from source by adding the following to the cmake invocation:
'-DCPM_DOWNLOAD_GTest=ON'")
endif()
if(NOT TARGET GTest::gtest AND TARGET gtest)
add_library(GTest::gtest ALIAS gtest)
add_library(GTest::gtest_main ALIAS gtest_main)
endif()
if(NOT TARGET GTest::gmock AND TARGET gmock)
add_library(GTest::gmock ALIAS gmock)
add_library(GTest::gmock_main ALIAS gmock_main)
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/libcudacxx.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_libcudacxx
---------------------
.. versionadded:: v21.12.00
Allow projects to find or build `libcudacxx` via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the version of libcudacxx :ref:`specified in the version file <cpm_versions>` for consistency
across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_libcudacxx( [BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[<CPM_ARGS> ...])
.. |PKG_NAME| replace:: libcudacxx
.. include:: common_package_args.txt
Result Targets
^^^^^^^^^^^^^^
libcudacxx::libcudacxx target will be created
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`libcudacxx_SOURCE_DIR` is set to the path to the source directory of libcudacxx.
:cmake:variable:`libcudacxx_BINARY_DIR` is set to the path to the build directory of libcudacxx.
:cmake:variable:`libcudacxx_ADDED` is set to a true value if libcudacxx has not been added before.
:cmake:variable:`libcudacxx_VERSION` is set to the version of libcudacxx specified by the versions.json.
#]=======================================================================]
function(rapids_cpm_libcudacxx)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.libcudacxx")
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(libcudacxx version repository tag shallow exclude)
set(to_install OFF)
if(INSTALL_EXPORT_SET IN_LIST ARGN AND NOT exclude)
set(to_install ON)
# By default if we allow libcudacxx to install into `CMAKE_INSTALL_INCLUDEDIR` alongside rmm (or
# other packages) we will get a install tree that looks like this:
# include/rmm include/cub include/libcudacxx
# This is a problem for CMake+NVCC due to the rules around import targets, and user/system
# includes. In this case both rmm and libcudacxx will specify an include path of `include`,
# while libcudacxx tries to mark it as an user include, since rmm uses CMake's default of system
# include. Compilers when provided the same include as both user and system always goes with
# system.
# Now while rmm could also mark `include` as system this just pushes the issue to another
# dependency which isn't built by RAPIDS and comes by and marks `include` as system.
# Instead the more reliable option is to make sure that we get libcudacxx to be placed in an
# unique include path that the other project will use. In the case of rapids-cmake we install
# the headers to `include/rapids/libcudacxx`
include(GNUInstallDirs)
set(CMAKE_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/rapids/libcudacxx")
set(CMAKE_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/rapids/")
endif()
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(libcudacxx ${version} patch_command)
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(libcudacxx ${version} ${ARGN}
GLOBAL_TARGETS libcudacxx::libcudacxx
CPM_ARGS
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command}
EXCLUDE_FROM_ALL ${exclude}
OPTIONS "libcudacxx_ENABLE_INSTALL_RULES ${to_install}")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(libcudacxx)
set(options)
set(one_value BUILD_EXPORT_SET INSTALL_EXPORT_SET)
set(multi_value)
cmake_parse_arguments(_RAPIDS "${options}" "${one_value}" "${multi_value}" ${ARGN})
if(libcudacxx_SOURCE_DIR)
# Store where CMake can find our custom libcudacxx
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(BUILD libcudacxx "${libcudacxx_SOURCE_DIR}/lib/cmake"
EXPORT_SET ${_RAPIDS_BUILD_EXPORT_SET})
rapids_export_find_package_root(INSTALL libcudacxx
[=[${CMAKE_CURRENT_LIST_DIR}/../../rapids/cmake/libcudacxx]=]
EXPORT_SET ${_RAPIDS_INSTALL_EXPORT_SET} CONDITION to_install)
endif()
# Propagate up variables that CPMFindPackage provide
set(libcudacxx_SOURCE_DIR "${libcudacxx_SOURCE_DIR}" PARENT_SCOPE)
set(libcudacxx_BINARY_DIR "${libcudacxx_BINARY_DIR}" PARENT_SCOPE)
set(libcudacxx_ADDED "${libcudacxx_ADDED}" PARENT_SCOPE)
set(libcudacxx_VERSION ${version} PARENT_SCOPE)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/gbench.cmake | #=============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_gbench
-----------------
.. versionadded:: v22.12.00
Allow projects to find or build Google Benchmark via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the version of Google benchmark :ref:`specified in the version file <cpm_versions>` for consistency
across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_gbench( [BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[BUILD_STATIC]
[<CPM_ARGS> ...])
.. |PKG_NAME| replace:: benchmark
.. include:: common_package_args.txt
.. versionadded:: v23.12.00
``BUILD_STATIC``
Will build Google Benchmark statically. No local searching for a previously
built version will occur.
Result Targets
^^^^^^^^^^^^^^
benchmark::benchmark targets will be created
#]=======================================================================]
function(rapids_cpm_gbench)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.gbench")
set(to_install OFF)
if(INSTALL_EXPORT_SET IN_LIST ARGN)
set(to_install ON)
endif()
set(build_shared ON)
if(BUILD_STATIC IN_LIST ARGN)
set(build_shared OFF)
set(CPM_DOWNLOAD_benchmark ON) # Since we need static we build from source
endif()
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(benchmark version repository tag shallow exclude)
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(benchmark ${version} patch_command)
include("${rapids-cmake-dir}/cmake/install_lib_dir.cmake")
rapids_cmake_install_lib_dir(lib_dir)
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(benchmark ${version} ${ARGN}
GLOBAL_TARGETS benchmark::benchmark benchmark::benchmark_main
CPM_ARGS
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command}
EXCLUDE_FROM_ALL ${exclude}
OPTIONS "BENCHMARK_ENABLE_GTEST_TESTS OFF" "BENCHMARK_ENABLE_TESTING OFF"
"BENCHMARK_ENABLE_INSTALL ${to_install}"
"CMAKE_INSTALL_LIBDIR ${lib_dir}" "BUILD_SHARED_LIBS ${build_shared}")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(benchmark)
if(NOT TARGET benchmark::benchmark AND TARGET benchmark)
add_library(benchmark::benchmark ALIAS benchmark)
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/package_override.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_package_override
---------------------------
.. versionadded:: v21.10.00
Overrides the :cmake:command:`rapids_cpm_find`, `rapids_cpm_* <../api.html#cpm-pre-configured-packages>`__,
`CPM <https://github.com/cpm-cmake/CPM.cmake>`_, and :cmake:module:`FetchContent() <cmake:module:FetchContent>` package information for the project.
.. code-block:: cmake
rapids_cpm_package_override(<json_file_path>)
Allows projects to override the default values for any :cmake:command:`rapids_cpm_find`,
`rapids_cpm_* <../api.html#cpm-pre-configured-packages>`__, `CPM <https://github.com/cpm-cmake/CPM.cmake>`_, and :cmake:module:`FetchContent() <cmake:module:FetchContent>` package.
The user provided json file must follow the `versions.json` format,
which is :ref:`documented here<cpm_version_format>` and shown in the below
example:
.. literalinclude:: /packages/example.json
:language: json
By default when an override for a project is provided no local search
for that project will occur. This is done to make sure that the requested modified
version is used.
If a project is listed in multiple override files, the first file values will be used,
and all later calls for that packaged will be ignored. This "first to record, wins"
approach is used to match FetchContent, and allows parent projects to override child
projects.
.. note::
.. versionadded:: v23.10.00
When the variable `CPM_<package_name>_SOURCE` exists, any override entries
for `package_name` will be ignored.
.. note::
If the override file doesn't specify a value or package entry the default
version will be used.
Must be called before any invocation of :cmake:command:`rapids_cpm_find`.
#]=======================================================================]
function(rapids_cpm_package_override filepath)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.rapids_cpm_package_override")
if(NOT EXISTS "${filepath}")
message(FATAL_ERROR "rapids_cpm_package_override can't load '${filepath}', verify it exists")
endif()
file(READ "${filepath}" json_data)
# Determine all the projects that exist in the json file
string(JSON package_count LENGTH "${json_data}" packages)
math(EXPR package_count "${package_count} - 1")
# For each project cache the subset of the json for that project in a global property so that
# packasge_details.cmake can fetch that information
if(package_count GREATER_EQUAL 0)
# cmake-lint: disable=E1120
foreach(index RANGE ${package_count})
string(JSON package_name MEMBER "${json_data}" packages ${index})
get_property(override_exists GLOBAL PROPERTY rapids_cpm_${package_name}_override_json DEFINED)
if(NOT (override_exists OR DEFINED CPM_${package_name}_SOURCE))
# only add the first override for a project we encounter
string(JSON data GET "${json_data}" packages "${package_name}")
set_property(GLOBAL PROPERTY rapids_cpm_${package_name}_override_json "${data}")
set_property(GLOBAL PROPERTY rapids_cpm_${package_name}_override_json_file "${filepath}")
endif()
endforeach()
# establish the fetch content
include(FetchContent)
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(${package_name} version repository tag shallow exclude)
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(${package_name} ${version} patch_command)
FetchContent_Declare(${package_name}
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command} EXCLUDE_FROM_ALL ${exclude})
endif()
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/fmt.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_fmt
-----------------
.. versionadded:: v23.04.00
Allow projects to find or build `fmt` via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the version of fmt :ref:`specified in the version file <cpm_versions>` for consistency
across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_fmt( [BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[<CPM_ARGS> ...])
.. |PKG_NAME| replace:: fmt
.. include:: common_package_args.txt
Result Targets
^^^^^^^^^^^^^^
fmt::fmt, fmt::fmt-header-only targets will be created
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`fmt_SOURCE_DIR` is set to the path to the source directory of fmt.
:cmake:variable:`fmt_BINARY_DIR` is set to the path to the build directory of fmt.
:cmake:variable:`fmt_ADDED` is set to a true value if fmt has not been added before.
:cmake:variable:`fmt_VERSION` is set to the version of fmt specified by the versions.json.
#]=======================================================================]
function(rapids_cpm_fmt)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.fmt")
set(to_install OFF)
if(INSTALL_EXPORT_SET IN_LIST ARGN)
set(to_install ON)
endif()
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(fmt version repository tag shallow exclude)
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(fmt ${version} patch_command)
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(fmt ${version} ${ARGN}
GLOBAL_TARGETS fmt::fmt fmt::fmt-header-only
CPM_ARGS
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command}
EXCLUDE_FROM_ALL ${exclude}
OPTIONS "FMT_INSTALL ${to_install}" "CMAKE_POSITION_INDEPENDENT_CODE ON")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(fmt)
# Propagate up variables that CPMFindPackage provide
set(fmt_SOURCE_DIR "${fmt_SOURCE_DIR}" PARENT_SCOPE)
set(fmt_BINARY_DIR "${fmt_BINARY_DIR}" PARENT_SCOPE)
set(fmt_ADDED "${fmt_ADDED}" PARENT_SCOPE)
set(fmt_VERSION ${version} PARENT_SCOPE)
# fmt creates the correct namespace aliases
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/cuco.cmake | #=============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_cuco
---------------------
.. versionadded:: v22.08.00
Allow projects to find or build `cuCollections` via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the version of cuCollections :ref:`specified in the version file <cpm_versions>` for consistency
across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_cuco( [BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[<CPM_ARGS> ...])
.. |PKG_NAME| replace:: cuco
.. include:: common_package_args.txt
Result Targets
^^^^^^^^^^^^^^
cuco::cuco target will be created
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`cuco_SOURCE_DIR` is set to the path to the source directory of cuco.
:cmake:variable:`cuco_BINARY_DIR` is set to the path to the build directory of cuco.
:cmake:variable:`cuco_ADDED` is set to a true value if cuco has not been added before.
:cmake:variable:`cuco_VERSION` is set to the version of cuco specified by the versions.json.
#]=======================================================================]
function(rapids_cpm_cuco)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.cuco")
set(options)
set(one_value INSTALL_EXPORT_SET)
set(multi_value)
cmake_parse_arguments(_RAPIDS "${options}" "${one_value}" "${multi_value}" ${ARGN})
# Fix up _RAPIDS_UNPARSED_ARGUMENTS to have INSTALL_EXPORT_SET as this is need for rapids_cpm_find
set(to_install OFF)
if(_RAPIDS_INSTALL_EXPORT_SET)
list(APPEND _RAPIDS_UNPARSED_ARGUMENTS INSTALL_EXPORT_SET ${_RAPIDS_INSTALL_EXPORT_SET})
set(to_install ON)
endif()
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(cuco version repository tag shallow exclude)
set(to_exclude OFF)
if(NOT to_install OR exclude)
set(to_exclude ON)
endif()
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(cuco ${version} patch_command)
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(cuco ${version} ${_RAPIDS_UNPARSED_ARGUMENTS}
GLOBAL_TARGETS cuco::cuco
CPM_ARGS
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command}
EXCLUDE_FROM_ALL ${to_exclude}
OPTIONS "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF" "BUILD_EXAMPLES OFF"
"INSTALL_CUCO ${to_install}")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(cuco)
# Propagate up variables that CPMFindPackage provide
set(cuco_SOURCE_DIR "${cuco_SOURCE_DIR}" PARENT_SCOPE)
set(cuco_BINARY_DIR "${cuco_BINARY_DIR}" PARENT_SCOPE)
set(cuco_ADDED "${cuco_ADDED}" PARENT_SCOPE)
set(cuco_VERSION ${version} PARENT_SCOPE)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/thrust.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_thrust
-----------------
.. versionadded:: v21.10.00
Allow projects to find or build `Thrust` via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the version of Thrust :ref:`specified in the version file <cpm_versions>` for consistency
across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_thrust( NAMESPACE <namespace>
[BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[<CPM_ARGS> ...])
``NAMESPACE``
The namespace that the Thrust target will be constructed into.
.. |PKG_NAME| replace:: Thrust
.. include:: common_package_args.txt
.. versionadded:: v23.12.00
When `BUILD_EXPORT_SET` is specified the generated build export set dependency
file will automatically call `thrust_create_target(<namespace>::Thrust FROM_OPTIONS)`.
When `INSTALL_EXPORT_SET` is specified the generated install export set dependency
file will automatically call `thrust_create_target(<namespace>::Thrust FROM_OPTIONS)`.
Result Targets
^^^^^^^^^^^^^^
<namespace>::Thrust target will be created
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`Thrust_SOURCE_DIR` is set to the path to the source directory of Thrust.
:cmake:variable:`Thrust_BINARY_DIR` is set to the path to the build directory of Thrust.
:cmake:variable:`Thrust_ADDED` is set to a true value if Thrust has not been added before.
:cmake:variable:`Thrust_VERSION` is set to the version of Thrust specified by the versions.json.
#]=======================================================================]
# cmake-lint: disable=R0915
function(rapids_cpm_thrust NAMESPACE namespaces_name)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.thrust")
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(Thrust version repository tag shallow exclude)
set(to_install OFF)
if(INSTALL_EXPORT_SET IN_LIST ARGN AND NOT exclude)
set(to_install ON)
# Make sure we install thrust into the `include/rapids` subdirectory instead of the default
include(GNUInstallDirs)
set(CMAKE_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/rapids")
set(CMAKE_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/rapids")
endif()
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(Thrust ${version} patch_command)
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(Thrust ${version} ${ARGN}
GLOBAL_TARGETS ${namespaces_name}::Thrust
CPM_ARGS FIND_PACKAGE_ARGUMENTS EXACT
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command}
EXCLUDE_FROM_ALL ${exclude}
OPTIONS "THRUST_ENABLE_INSTALL_RULES ${to_install}")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(Thrust)
set(options)
set(one_value BUILD_EXPORT_SET INSTALL_EXPORT_SET)
set(multi_value)
cmake_parse_arguments(_RAPIDS "${options}" "${one_value}" "${multi_value}" ${ARGN})
set(post_find_code "if(NOT TARGET ${namespaces_name}::Thrust)"
" thrust_create_target(${namespaces_name}::Thrust FROM_OPTIONS)" "endif()")
if(Thrust_SOURCE_DIR)
# Store where CMake can find the Thrust-config.cmake that comes part of Thrust source code
include("${rapids-cmake-dir}/export/find_package_root.cmake")
include("${rapids-cmake-dir}/export/detail/post_find_package_code.cmake")
rapids_export_find_package_root(BUILD Thrust "${Thrust_SOURCE_DIR}/cmake"
EXPORT_SET ${_RAPIDS_BUILD_EXPORT_SET})
rapids_export_post_find_package_code(BUILD Thrust "${post_find_code}" EXPORT_SET
${_RAPIDS_BUILD_EXPORT_SET})
rapids_export_find_package_root(INSTALL Thrust
[=[${CMAKE_CURRENT_LIST_DIR}/../../rapids/cmake/thrust]=]
EXPORT_SET ${_RAPIDS_INSTALL_EXPORT_SET} CONDITION to_install)
rapids_export_post_find_package_code(INSTALL Thrust "${post_find_code}" EXPORT_SET
${_RAPIDS_INSTALL_EXPORT_SET} CONDITION to_install)
endif()
# Check for the existence of thrust_create_target so we support fetching Thrust with DOWNLOAD_ONLY
if(NOT TARGET ${namespaces_name}::Thrust AND COMMAND thrust_create_target)
thrust_create_target(${namespaces_name}::Thrust FROM_OPTIONS)
set_target_properties(${namespaces_name}::Thrust PROPERTIES IMPORTED_NO_SYSTEM ON)
if(TARGET _Thrust_Thrust)
set_target_properties(_Thrust_Thrust PROPERTIES IMPORTED_NO_SYSTEM ON)
endif()
endif()
# Propagate up variables that CPMFindPackage provide
set(Thrust_SOURCE_DIR "${Thrust_SOURCE_DIR}" PARENT_SCOPE)
set(Thrust_BINARY_DIR "${Thrust_BINARY_DIR}" PARENT_SCOPE)
set(Thrust_ADDED "${Thrust_ADDED}" PARENT_SCOPE)
set(Thrust_VERSION ${version} PARENT_SCOPE)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/versions.json | {
"packages" : {
"benchmark" : {
"version" : "1.8.0",
"git_url" : "https://github.com/google/benchmark.git",
"git_tag" : "v${version}"
},
"cuco" : {
"version" : "0.0.1",
"git_shallow" : false,
"git_url" : "https://github.com/NVIDIA/cuCollections.git",
"git_tag" : "7c76a124df0c2cd3fd66e3e080b9470a3b4707c6"
},
"fmt" : {
"version" : "9.1.0",
"git_url" : "https://github.com/fmtlib/fmt.git",
"git_tag" : "${version}",
"patches" : [
{
"file" : "fmt/no_debug_warnings.diff",
"issue" : "No warnings during debug builds [https://github.com/fmtlib/fmt/issues/3351]",
"fixed_in" : "10.0"
}
]
},
"GTest" : {
"version" : "1.13.0",
"git_url" : "https://github.com/google/googletest.git",
"git_tag" : "v${version}"
},
"libcudacxx" : {
"version" : "2.1.0",
"git_url" : "https://github.com/NVIDIA/libcudacxx.git",
"git_tag" : "${version}",
"patches" : [
{
"file" : "libcudacxx/install_rules.diff",
"issue" : "libcudacxx installs incorrect files [https://github.com/NVIDIA/libcudacxx/pull/428]",
"fixed_in" : "2.2"
},
{
"file" : "libcudacxx/reroot_support.diff",
"issue" : "Support conda-forge usage of CMake rerooting [https://github.com/NVIDIA/libcudacxx/pull/490], requires libcudacxx/install_rules.diff.",
"fixed_in" : "2.2"
},
{
"file" : "libcudacxx/proclaim_return_type_nv_exec_check_disable.diff",
"issue" : "Use pragma to disable execution checks in cuda::proclaim_return_type. [https://github.com/NVIDIA/libcudacxx/pull/448]",
"fixed_in" : "2.2"
},
{
"file" : "libcudacxx/memory_resource.diff",
"issue" : "Allow {async_}resource_ref to be constructible from a pointer. [https://github.com/NVIDIA/libcudacxx/pull/439]",
"fixed_in" : "2.2"
}
]
},
"nvbench" : {
"version" : "0.0",
"git_shallow" : false,
"git_url" : "https://github.com/NVIDIA/nvbench.git",
"git_tag" : "978d81a0cba97e3f30508e3c0e3cd65ce94fb699"
},
"nvcomp" : {
"version" : "3.0.4",
"git_url" : "https://github.com/NVIDIA/nvcomp.git",
"git_tag" : "v2.2.0",
"proprietary_binary" : {
"x86_64-linux" : "https://developer.download.nvidia.com/compute/nvcomp/${version}/local_installers/nvcomp_${version}_x86_64_${cuda-toolkit-version-major}.x.tgz",
"aarch64-linux" : "https://developer.download.nvidia.com/compute/nvcomp/${version}/local_installers/nvcomp_${version}_SBSA_${cuda-toolkit-version-major}.x.tgz"
}
},
"rmm" : {
"version" : "${rapids-cmake-version}",
"git_url" : "https://github.com/rapidsai/rmm.git",
"git_tag" : "branch-${version}"
},
"spdlog" : {
"version" : "1.11.0",
"git_url" : "https://github.com/gabime/spdlog.git",
"git_tag" : "v${version}"
},
"Thrust" : {
"version" : "1.17.2",
"git_url" : "https://github.com/NVIDIA/thrust.git",
"git_tag" : "${version}",
"patches" : [
{
"file" : "Thrust/reroot_support.diff",
"issue" : "Support conda-forge usage of CMake rerooting [https://github.com/NVIDIA/thrust/pull/1969]",
"fixed_in" : "2.2"
},
{
"file" : "Thrust/transform_iter_with_reduce_by_key.diff",
"issue" : "Support transform iterator with reduce by key [https://github.com/NVIDIA/thrust/pull/1805]",
"fixed_in" : "2.1"
},
{
"file" : "Thrust/install_rules.diff",
"issue" : "Thrust 1.X installs incorrect files [https://github.com/NVIDIA/thrust/issues/1790]",
"fixed_in" : "2.0"
}
]
}
}
}
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/nvcomp.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_nvcomp
-----------------
.. versionadded:: v22.06.00
Allow projects to find or build `nvComp` via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the version of nvComp :ref:`specified in the version file <cpm_versions>` for consistency
across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_nvcomp( [USE_PROPRIETARY_BINARY <ON|OFF>]
[BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[<CPM_ARGS> ...])
``USE_PROPRIETARY_BINARY``
By enabling this flag and using the software, you agree to fully comply with the terms and conditions of
nvcomp's NVIDIA Software License Agreement. Found at https://developer.download.nvidia.com/compute/nvcomp/2.3/LICENSE.txt
NVComp offers pre-built proprietary version of the library ( for x86_64 only ) that offer more features compared to the
open source version. Since NVComp currently doesn't offer pre-built versions for all platforms, callers should verify
the the request for a proprietary binary was fulfilled by checking the :cmake:variable:`nvcomp_proprietary_binary`
variable after calling :cmake:command:`rapids_cpm_nvcomp`.
.. note::
If an override entry exists for the nvcomp package it MUST have a proprietary_binary entry for this to
flag to do anything. Any override without this entry is considered to invalidate the existing proprietary
binary entry.
.. |PKG_NAME| replace:: nvcomp
.. include:: common_package_args.txt
Result Targets
^^^^^^^^^^^^^^
nvcomp::nvcomp target will be created
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`nvcomp_SOURCE_DIR` is set to the path to the source directory of nvcomp.
:cmake:variable:`nvcomp_BINARY_DIR` is set to the path to the build directory of nvcomp.
:cmake:variable:`nvcomp_ADDED` is set to a true value if nvcomp has not been added before.
:cmake:variable:`nvcomp_VERSION` is set to the version of nvcomp specified by the versions.json.
:cmake:variable:`nvcomp_proprietary_binary` is set to ON if the proprietary binary is being used
#]=======================================================================]
# cmake-lint: disable=R0915,R0912
function(rapids_cpm_nvcomp)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.nvcomp")
set(options)
set(one_value USE_PROPRIETARY_BINARY BUILD_EXPORT_SET INSTALL_EXPORT_SET)
set(multi_value)
cmake_parse_arguments(_RAPIDS "${options}" "${one_value}" "${multi_value}" ${ARGN})
# Fix up _RAPIDS_UNPARSED_ARGUMENTS to have EXPORT_SETS as this is need for rapids_cpm_find
if(_RAPIDS_INSTALL_EXPORT_SET)
list(APPEND _RAPIDS_EXPORT_ARGUMENTS INSTALL_EXPORT_SET ${_RAPIDS_INSTALL_EXPORT_SET})
endif()
if(_RAPIDS_BUILD_EXPORT_SET)
list(APPEND _RAPIDS_EXPORT_ARGUMENTS BUILD_EXPORT_SET ${_RAPIDS_BUILD_EXPORT_SET})
endif()
set(_RAPIDS_UNPARSED_ARGUMENTS ${_RAPIDS_EXPORT_ARGUMENTS})
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(nvcomp version repository tag shallow exclude)
set(to_exclude OFF)
if(NOT _RAPIDS_INSTALL_EXPORT_SET OR exclude)
set(to_exclude ON)
endif()
# first search locally if `rapids_cmake_always_download` is false
if(NOT rapids_cmake_always_download)
include("${rapids-cmake-dir}/find/package.cmake")
rapids_find_package(nvcomp ${version} GLOBAL_TARGETS nvcomp::nvcomp ${_RAPIDS_EXPORT_ARGUMENTS}
FIND_ARGS QUIET)
if(nvcomp_FOUND)
# report where nvcomp was found
message(STATUS "Found nvcomp: ${nvcomp_DIR} (found version ${nvcomp_VERSION})")
endif()
endif()
# second see if we have a proprietary pre-built binary listed in versions.json and it if
# requested.
set(nvcomp_proprietary_binary OFF) # will be set to true by rapids_cpm_get_proprietary_binary
if(_RAPIDS_USE_PROPRIETARY_BINARY AND NOT nvcomp_FOUND)
include("${rapids-cmake-dir}/cpm/detail/get_proprietary_binary_url.cmake")
include("${rapids-cmake-dir}/cpm/detail/download_proprietary_binary.cmake")
rapids_cpm_get_proprietary_binary_url(nvcomp ${version} nvcomp_url)
if(nvcomp_url)
rapids_cpm_download_proprietary_binary(nvcomp ${nvcomp_url})
endif()
# Record the nvcomp_DIR so that if USE_PROPRIETARY_BINARY is disabled we can safely clear the
# nvcomp_DIR value
if(nvcomp_proprietary_binary)
set(nvcomp_proprietary_binary_dir "${nvcomp_ROOT}/lib/cmake/nvcomp")
cmake_path(NORMAL_PATH nvcomp_proprietary_binary_dir)
set(rapids_cpm_nvcomp_proprietary_binary_dir "${nvcomp_proprietary_binary_dir}"
CACHE INTERNAL "nvcomp proprietary location")
endif()
elseif(DEFINED nvcomp_DIR)
cmake_path(NORMAL_PATH nvcomp_DIR)
if(nvcomp_DIR STREQUAL rapids_cpm_nvcomp_proprietary_binary_dir)
unset(nvcomp_DIR)
unset(nvcomp_DIR CACHE)
endif()
endif()
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(nvcomp ${version} patch_command)
# Apply any patch commands to the proprietary binary
if(nvcomp_proprietary_binary AND patch_command)
execute_process(COMMAND ${patch_command} WORKING_DIRECTORY ${nvcomp_ROOT})
endif()
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(nvcomp ${version} ${_RAPIDS_UNPARSED_ARGUMENTS}
GLOBAL_TARGETS nvcomp::nvcomp
CPM_ARGS
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
EXCLUDE_FROM_ALL ${to_exclude}
PATCH_COMMAND ${patch_command}
OPTIONS "BUILD_STATIC ON" "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF"
"BUILD_EXAMPLES OFF")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(nvcomp)
# provide consistent targets between a found nvcomp and one building from source
if(NOT TARGET nvcomp::nvcomp AND TARGET nvcomp)
add_library(nvcomp::nvcomp ALIAS nvcomp)
endif()
# Propagate up variables that CPMFindPackage provide
set(nvcomp_SOURCE_DIR "${nvcomp_SOURCE_DIR}" PARENT_SCOPE)
set(nvcomp_BINARY_DIR "${nvcomp_BINARY_DIR}" PARENT_SCOPE)
set(nvcomp_ADDED "${nvcomp_ADDED}" PARENT_SCOPE)
set(nvcomp_VERSION ${version} PARENT_SCOPE)
set(nvcomp_proprietary_binary ${nvcomp_proprietary_binary} PARENT_SCOPE)
# Set up up install rules when using the proprietary_binary. When building from source, nvcomp
# will set the correct install rules
include("${rapids-cmake-dir}/export/find_package_root.cmake")
if(NOT to_exclude AND nvcomp_proprietary_binary)
include(GNUInstallDirs)
install(DIRECTORY "${nvcomp_ROOT}/lib/" DESTINATION lib)
install(DIRECTORY "${nvcomp_ROOT}/include/" DESTINATION include)
# place the license information in the location that conda uses
install(FILES "${nvcomp_ROOT}/NOTICE" DESTINATION info/ RENAME NVCOMP_NOTICE)
install(FILES "${nvcomp_ROOT}/LICENSE" DESTINATION info/ RENAME NVCOMP_LICENSE)
endif()
# point our consumers to where they can find the pre-built version
rapids_export_find_package_root(BUILD nvcomp "${nvcomp_ROOT}"
EXPORT_SET ${_RAPIDS_BUILD_EXPORT_SET}
CONDITION nvcomp_proprietary_binary)
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/nvbench.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
include_guard(GLOBAL)
#[=======================================================================[.rst:
rapids_cpm_nvbench
------------------
.. versionadded:: v21.10.00
Allow projects to find or build `nvbench` via `CPM` with built-in
tracking of these dependencies for correct export support.
Uses the version of nvbench :ref:`specified in the version file <cpm_versions>` for consistency
across all RAPIDS projects.
.. code-block:: cmake
rapids_cpm_nvbench( [BUILD_EXPORT_SET <export-name>]
[INSTALL_EXPORT_SET <export-name>]
[BUILD_STATIC]
[<CPM_ARGS> ...])
.. |PKG_NAME| replace:: nvbench
.. include:: common_package_args.txt
.. versionadded:: v23.12.00
``BUILD_STATIC``
Will build nvbench statically. No local searching for a previously
built version will occur.
Result Targets
^^^^^^^^^^^^^^
nvbench::nvbench target will be created
nvbench::main target will be created
Result Variables
^^^^^^^^^^^^^^^^
:cmake:variable:`nvbench_SOURCE_DIR` is set to the path to the source directory of nvbench.
:cmake:variable:`nvbench_BINARY_DIR` is set to the path to the build directory of nvbench.
:cmake:variable:`nvbench_ADDED` is set to a true value if nvbench has not been added before.
:cmake:variable:`nvbench_VERSION` is set to the version of nvbench specified by the versions.json.
#]=======================================================================]
function(rapids_cpm_nvbench)
list(APPEND CMAKE_MESSAGE_CONTEXT "rapids.cpm.nvbench")
set(to_install OFF)
if(INSTALL_EXPORT_SET IN_LIST ARGN)
set(to_install ON)
endif()
set(build_shared ON)
if(BUILD_STATIC IN_LIST ARGN)
set(build_shared OFF)
set(CPM_DOWNLOAD_nvbench ON) # Since we need static we build from source
endif()
include("${rapids-cmake-dir}/cpm/detail/package_details.cmake")
rapids_cpm_package_details(nvbench version repository tag shallow exclude)
# CUDA::nvml is an optional package and might not be installed ( aka conda )
find_package(CUDAToolkit REQUIRED)
set(nvbench_with_nvml "OFF")
if(TARGET CUDA::nvml)
set(nvbench_with_nvml "ON")
endif()
include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake")
rapids_cpm_generate_patch_command(nvbench ${version} patch_command)
include("${rapids-cmake-dir}/cpm/find.cmake")
rapids_cpm_find(nvbench ${version} ${ARGN}
GLOBAL_TARGETS nvbench::nvbench nvbench::main
CPM_ARGS
GIT_REPOSITORY ${repository}
GIT_TAG ${tag}
GIT_SHALLOW ${shallow}
PATCH_COMMAND ${patch_command}
EXCLUDE_FROM_ALL ${exclude}
OPTIONS "NVBench_ENABLE_NVML ${nvbench_with_nvml}" "NVBench_ENABLE_EXAMPLES OFF"
"NVBench_ENABLE_TESTING OFF" "NVBench_ENABLE_INSTALL_RULES ${to_install}"
"BUILD_SHARED_LIBS ${build_shared}")
include("${rapids-cmake-dir}/cpm/detail/display_patch_status.cmake")
rapids_cpm_display_patch_status(nvbench)
# Propagate up variables that CPMFindPackage provide
set(nvbench_SOURCE_DIR "${nvbench_SOURCE_DIR}" PARENT_SCOPE)
set(nvbench_BINARY_DIR "${nvbench_BINARY_DIR}" PARENT_SCOPE)
set(nvbench_ADDED "${nvbench_ADDED}" PARENT_SCOPE)
set(nvbench_VERSION ${version} PARENT_SCOPE)
# nvbench creates the correct namespace aliases
endfunction()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches/command_template.cmake.in | #=============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
set(msg_state)
function(rapids_cpm_run_git_patch file issue)
set(git_command @GIT_EXECUTABLE@)
cmake_path(GET file FILENAME file_name)
cmake_path(GET file_name EXTENSION LAST_ONLY ext)
string(SUBSTRING "${ext}" 1 -1 ext)
if(NOT (ext STREQUAL "diff" OR ext STREQUAL "patch") )
list(APPEND msg_state "rapids-cmake: Unable to apply ${file} as ${ext} is unsupported. Only .diff and .patch are supported")
set(msg_state ${msg_state} PARENT_SCOPE)
return()
endif()
set(command apply)
set(args)
if(ext STREQUAL "patch")
set(command am)
set(args -3)
endif()
set(result 1)
if(ext STREQUAL "diff")
execute_process(
COMMAND ${git_command} apply ${file}
RESULT_VARIABLE result
ERROR_VARIABLE repo_error_info
)
if(NOT result EQUAL 0)
# See if the diff was previously applied
execute_process(
COMMAND ${git_command} apply --reverse --check ${file}
RESULT_VARIABLE result
)
endif()
elseif(ext STREQUAL "patch")
# no need to check if the git patch was already applied
# `am` does that and returns a success error code for those cases
execute_process(
COMMAND ${git_command} am -3 ${file}
RESULT_VARIABLE result
ERROR_VARIABLE repo_error_info
)
endif()
if(result EQUAL 0)
list(APPEND msg_state "rapids-cmake [@package_name@]: applied ${ext} ${file_name} to fix issue: '${issue}'")
else()
list(APPEND msg_state "rapids-cmake [@package_name@]: failed to apply ${ext} ${file_name}")
list(APPEND msg_state "rapids-cmake [@package_name@]: git ${ext} output: ${repo_error_info}")
endif()
list(APPEND msg_state "\n")
set(msg_state ${msg_state} PARENT_SCOPE)
endfunction()
# We want to ensure that any patched files have a timestamp
# that is at least 1 second newer compared to the git checkout
# This ensures that all of CMake up-to-date install logic
# considers these files as modified.
#
# This ensures that if our patch contains additional install rules
# they will execute even when an existing install rule exists
# with the same destination ( and our patch is listed last ).
execute_process(COMMAND ${CMAKE_COMMAND} -E sleep 1)
set(files "@patch_files_to_run@")
set(issues "@patch_issues_to_ref@")
set(output_file "@log_file@")
foreach(file issue IN ZIP_LISTS files issues)
rapids_cpm_run_git_patch(${file} ${issue})
endforeach()
if(msg_state)
file(WRITE "${output_file}" ${msg_state})
endif()
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches/Thrust/transform_iter_with_reduce_by_key.diff | diff --git a/thrust/iterator/transform_input_output_iterator.h b/thrust/iterator/transform_input_output_iterator.h
index f512a36..a5f725d 100644
--- a/thrust/iterator/transform_input_output_iterator.h
+++ b/thrust/iterator/transform_input_output_iterator.h
@@ -102,6 +102,8 @@ template <typename InputFunction, typename OutputFunction, typename Iterator>
/*! \endcond
*/
+ transform_input_output_iterator() = default;
+
/*! This constructor takes as argument a \c Iterator an \c InputFunction and an
* \c OutputFunction and copies them to a new \p transform_input_output_iterator
*
diff --git a/thrust/iterator/transform_output_iterator.h b/thrust/iterator/transform_output_iterator.h
index 66fb46a..4a68cb5 100644
--- a/thrust/iterator/transform_output_iterator.h
+++ b/thrust/iterator/transform_output_iterator.h
@@ -104,6 +104,8 @@ template <typename UnaryFunction, typename OutputIterator>
/*! \endcond
*/
+ transform_output_iterator() = default;
+
/*! This constructor takes as argument an \c OutputIterator and an \c
* UnaryFunction and copies them to a new \p transform_output_iterator
*
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches/Thrust/reroot_support.diff | diff --git a/dependencies/cub/cub/cmake/cub-header-search.cmake b/dependencies/cub/cub/cmake/cub-header-search.cmake
index 2ff1a8acd8..5e731f2be8 100644
--- a/dependencies/cub/cub/cmake/cub-header-search.cmake
+++ b/dependencies/cub/cub/cmake/cub-header-search.cmake
@@ -1,5 +1,6 @@
unset(_CUB_VERSION_INCLUDE_DIR CACHE) # Clear old result to force search
find_path(_CUB_VERSION_INCLUDE_DIR cub/version.cuh
+ NO_CMAKE_FIND_ROOT_PATH
NO_DEFAULT_PATH # Only search explicit paths below:
PATHS
"${CMAKE_CURRENT_LIST_DIR}/../.." # Source tree
diff --git a/dependencies/cub/cub/cmake/cub-header-search.cmake.in b/dependencies/cub/cub/cmake/cub-header-search.cmake.in
index 271b1b27bd..3bd10e4b70 100644
--- a/dependencies/cub/cub/cmake/cub-header-search.cmake.in
+++ b/dependencies/cub/cub/cmake/cub-header-search.cmake.in
@@ -11,6 +11,7 @@ list(TRANSFORM from_install_prefix REPLACE ".+" "../")
list(JOIN from_install_prefix "" from_install_prefix)
find_path(_CUB_VERSION_INCLUDE_DIR cub/version.cuh
+ NO_CMAKE_FIND_ROOT_PATH
NO_DEFAULT_PATH # Only search explicit paths below:
PATHS
"${CMAKE_CURRENT_LIST_DIR}/${from_install_prefix}/@CMAKE_INSTALL_INCLUDEDIR@"
diff --git a/thrust/cmake/thrust-header-search.cmake b/thrust/cmake/thrust-header-search.cmake
index 643ec90b7..7d27c68f4 100644
--- a/thrust/cmake/thrust-header-search.cmake
+++ b/thrust/cmake/thrust-header-search.cmake
@@ -1,6 +1,7 @@
# Parse version information from version.h:
unset(_THRUST_VERSION_INCLUDE_DIR CACHE) # Clear old result to force search
find_path(_THRUST_VERSION_INCLUDE_DIR thrust/version.h
+ NO_CMAKE_FIND_ROOT_PATH
NO_DEFAULT_PATH # Only search explicit paths below:
PATHS
"${CMAKE_CURRENT_LIST_DIR}/../.." # Source tree
diff --git a/thrust/cmake/thrust-header-search.cmake.in b/thrust/cmake/thrust-header-search.cmake.in
index c014c469b..adea07e2d 100644
--- a/thrust/cmake/thrust-header-search.cmake.in
+++ b/thrust/cmake/thrust-header-search.cmake.in
@@ -11,6 +11,7 @@ list(TRANSFORM from_install_prefix REPLACE ".+" "../")
list(JOIN from_install_prefix "" from_install_prefix)
find_path(_THRUST_VERSION_INCLUDE_DIR thrust/version.h
+ NO_CMAKE_FIND_ROOT_PATH
NO_DEFAULT_PATH # Only search explicit paths below:
PATHS
"${CMAKE_CURRENT_LIST_DIR}/${from_install_prefix}/@CMAKE_INSTALL_INCLUDEDIR@"
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches/Thrust/install_rules.diff | diff --git a/cmake/ThrustInstallRules.cmake b/cmake/ThrustInstallRules.cmake
index 93084c1..bf6c195 100644
--- a/cmake/ThrustInstallRules.cmake
+++ b/cmake/ThrustInstallRules.cmake
@@ -13,7 +13,7 @@ install(DIRECTORY "${Thrust_SOURCE_DIR}/thrust"
install(DIRECTORY "${Thrust_SOURCE_DIR}/thrust/cmake/"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/thrust"
- PATTERN thrust-header-search EXCLUDE
+ REGEX thrust-header-search.* EXCLUDE
)
# Need to configure a file to store the infix specified in
# CMAKE_INSTALL_INCLUDEDIR since it can be defined by the user
@@ -39,7 +39,7 @@ if (THRUST_INSTALL_CUB_HEADERS)
# Need to configure a file to store THRUST_INSTALL_HEADER_INFIX
install(DIRECTORY "${Thrust_SOURCE_DIR}/dependencies/cub/cub/cmake/"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/cub"
- PATTERN cub-header-search EXCLUDE
+ REGEX cub-header-search.* EXCLUDE
)
set(install_location "${CMAKE_INSTALL_LIBDIR}/cmake/cub")
configure_file("${Thrust_SOURCE_DIR}/dependencies/cub/cub/cmake/cub-header-search.cmake.in"
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches/libcudacxx/proclaim_return_type_nv_exec_check_disable.diff | diff --git a/include/cuda/std/detail/libcxx/include/__functional/invoke.h b/include/cuda/std/detail/libcxx/include/__functional/invoke.h
index 1ab318d5..850d00a8 100644
--- a/include/cuda/std/detail/libcxx/include/__functional/invoke.h
+++ b/include/cuda/std/detail/libcxx/include/__functional/invoke.h
@@ -342,6 +342,9 @@ _LIBCUDACXX_INLINE_VISIBILITY __nat __invoke(__any, _Args&& ...__args);
// bullets 1, 2 and 3
+#ifdef __CUDACC__
+#pragma nv_exec_check_disable
+#endif
template <class _Fp, class _A0, class ..._Args,
class = __enable_if_bullet1<_Fp, _A0> >
inline _LIBCUDACXX_INLINE_VISIBILITY
@@ -350,6 +353,9 @@ __invoke(_Fp&& __f, _A0&& __a0, _Args&& ...__args)
_NOEXCEPT_(noexcept((static_cast<_A0&&>(__a0).*__f)(static_cast<_Args&&>(__args)...)))
{ return (static_cast<_A0&&>(__a0).*__f)(static_cast<_Args&&>(__args)...); }
+#ifdef __CUDACC__
+#pragma nv_exec_check_disable
+#endif
template <class _Fp, class _A0, class ..._Args,
class = __enable_if_bullet2<_Fp, _A0> >
inline _LIBCUDACXX_INLINE_VISIBILITY
@@ -358,6 +364,9 @@ __invoke(_Fp&& __f, _A0&& __a0, _Args&& ...__args)
_NOEXCEPT_(noexcept((__a0.get().*__f)(static_cast<_Args&&>(__args)...)))
{ return (__a0.get().*__f)(static_cast<_Args&&>(__args)...); }
+#ifdef __CUDACC__
+#pragma nv_exec_check_disable
+#endif
template <class _Fp, class _A0, class ..._Args,
class = __enable_if_bullet3<_Fp, _A0> >
inline _LIBCUDACXX_INLINE_VISIBILITY
@@ -368,6 +377,9 @@ __invoke(_Fp&& __f, _A0&& __a0, _Args&& ...__args)
// bullets 4, 5 and 6
+#ifdef __CUDACC__
+#pragma nv_exec_check_disable
+#endif
template <class _Fp, class _A0,
class = __enable_if_bullet4<_Fp, _A0> >
inline _LIBCUDACXX_INLINE_VISIBILITY
@@ -376,6 +388,9 @@ __invoke(_Fp&& __f, _A0&& __a0)
_NOEXCEPT_(noexcept(static_cast<_A0&&>(__a0).*__f))
{ return static_cast<_A0&&>(__a0).*__f; }
+#ifdef __CUDACC__
+#pragma nv_exec_check_disable
+#endif
template <class _Fp, class _A0,
class = __enable_if_bullet5<_Fp, _A0> >
inline _LIBCUDACXX_INLINE_VISIBILITY
@@ -384,6 +399,9 @@ __invoke(_Fp&& __f, _A0&& __a0)
_NOEXCEPT_(noexcept(__a0.get().*__f))
{ return __a0.get().*__f; }
+#ifdef __CUDACC__
+#pragma nv_exec_check_disable
+#endif
template <class _Fp, class _A0,
class = __enable_if_bullet6<_Fp, _A0> >
inline _LIBCUDACXX_INLINE_VISIBILITY
@@ -394,6 +412,9 @@ __invoke(_Fp&& __f, _A0&& __a0)
// bullet 7
+#ifdef __CUDACC__
+#pragma nv_exec_check_disable
+#endif
template <class _Fp, class ..._Args>
inline _LIBCUDACXX_INLINE_VISIBILITY
_LIBCUDACXX_CONSTEXPR decltype(_CUDA_VSTD::declval<_Fp>()(_CUDA_VSTD::declval<_Args>()...))
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches/libcudacxx/memory_resource.diff | diff --git a/include/cuda/memory_resource b/include/cuda/memory_resource
index 4a904cda..32f3f210 100644
--- a/include/cuda/memory_resource
+++ b/include/cuda/memory_resource
@@ -525,7 +525,16 @@ public:
&& (((_Alloc_type == _AllocType::_Default) && resource_with<_Resource, _Properties...>) //
||((_Alloc_type == _AllocType::_Async) && async_resource_with<_Resource, _Properties...>)))) //
basic_resource_ref(_Resource& __res) noexcept
- : _Resource_ref_base<_Alloc_type>(&__res, &__alloc_vtable<_Alloc_type, _Resource>)
+ : _Resource_ref_base<_Alloc_type>(_CUDA_VSTD::addressof(__res), &__alloc_vtable<_Alloc_type, _Resource>)
+ , _Filtered_vtable<_Properties...>(_Filtered_vtable<_Properties...>::template _Create<_Resource>())
+ {}
+
+ _LIBCUDACXX_TEMPLATE(class _Resource)
+ (requires (!_Is_basic_resource_ref<_Resource>
+ && (((_Alloc_type == _AllocType::_Default) && resource_with<_Resource, _Properties...>) //
+ ||((_Alloc_type == _AllocType::_Async) && async_resource_with<_Resource, _Properties...>)))) //
+ basic_resource_ref(_Resource* __res) noexcept
+ : _Resource_ref_base<_Alloc_type>(__res, &__alloc_vtable<_Alloc_type, _Resource>)
, _Filtered_vtable<_Properties...>(_Filtered_vtable<_Properties...>::template _Create<_Resource>())
{}
| 0 |
rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches | rapidsai_public_repos/rapids-cmake/rapids-cmake/cpm/patches/libcudacxx/reroot_support.diff | diff --git a/lib/cmake/libcudacxx/libcudacxx-header-search.cmake.in b/lib/cmake/libcudacxx/libcudacxx-header-search.cmake.in
index 6130197f..ec53d5de 100644
--- a/lib/cmake/libcudacxx/libcudacxx-header-search.cmake.in
+++ b/lib/cmake/libcudacxx/libcudacxx-header-search.cmake.in
@@ -5,6 +5,7 @@ unset(_libcudacxx_VERSION_INCLUDE_DIR CACHE) # Clear old result to force search
set(from_install_prefix "@from_install_prefix@")
find_path(_libcudacxx_VERSION_INCLUDE_DIR cuda/std/detail/__config
+ NO_CMAKE_FIND_ROOT_PATH # Don't allow CMake to re-root the search
NO_DEFAULT_PATH # Only search explicit paths below:
PATHS
"${CMAKE_CURRENT_LIST_DIR}/${from_install_prefix}/@CMAKE_INSTALL_INCLUDEDIR@" # Install tree
| 0 |