repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos | rapidsai_public_repos/dependency-file-generator/renovate.json | {
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:base"]
}
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dependency-file-generator/pyproject.toml | [build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[project]
name = "rapids-dependency-file-generator"
dynamic = [
"version",
]
authors = [
{ name = "RAPIDS Development Team", email = "pypi@rapids.ai" }
]
urls = { homepage = "https://github.com/rapidsai/dependency-file-generator" }
description = "Tool for generating RAPIDS environment files"
readme = { file = "README.md", content-type = "text/markdown" }
license = { file = "LICENSE" }
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
]
requires-python = ">=3.8"
dependencies = [
"PyYAML",
"jsonschema",
"tomlkit",
]
[project.scripts]
rapids-dependency-file-generator = "rapids_dependency_file_generator.cli:main"
[tool.setuptools]
packages = { "find" = { where = ["src"] } }
[tool.setuptools.dynamic]
version = {attr = "rapids_dependency_file_generator._version.__version__"}
[tool.isort]
profile = "black"
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dependency-file-generator/.flake8 | [flake8]
max-line-length = 120
select = E,F,W
ignore = E123,E126,E203,E226,E241,E704,W503,W504
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dependency-file-generator/.releaserc.yaml | branches:
- main
plugins:
- "@semantic-release/commit-analyzer"
- "@semantic-release/release-notes-generator"
- - "@semantic-release/github"
- addReleases: top
- - "@semantic-release/exec"
- verifyReleaseCmd: ./ci/update-versions.sh ${nextRelease.version} && ./ci/build-test.sh
publishCmd: ./ci/pypi-publish.sh
- - "@semantic-release/git"
- assets:
- src/rapids_dependency_file_generator/_version.py
- src/rapids_dependency_file_generator/schema.json
- package.json
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dependency-file-generator/README.md | # rapids-dependency-file-generator
`rapids-dependency-file-generator` is a Python CLI tool that generates conda `environment.yaml` files and `requirements.txt` files from a single YAML file, typically named `dependencies.yaml`.
When installed, it makes the `rapids-dependency-file-generator` CLI command available which is responsible for parsing a `dependencies.yaml` configuration file and generating the appropriate conda `environment.yaml` and `requirements.txt` dependency files.
## Table of Contents
- [Installation](#installation)
- [Usage](#usage)
- [`dependencies.yaml` Format](#dependenciesyaml-format)
- [`files` key](#files-key)
- [`channels` key](#channels-key)
- [`dependencies` key](#dependencies-key)
- [How Dependency Lists Are Merged](#how-dependency-lists-are-merged)
- [Additional CLI Notes](#additional-cli-notes)
- [Examples](#examples)
## Installation
`rapids-dependency-file-generator` is available on [PyPI](https://pypi.org/project/rapids-dependency-file-generator/). To install, run:
```sh
pip install rapids-dependency-file-generator
```
## Usage
When `rapids-dependency-file-generator` is invoked, it will read a `dependencies.yaml` file from the current directory and generate children dependency files.
The `dependencies.yaml` file has the following characteristics:
- it is intended to be committed to the root directory of repositories
- it can define matrices that enable the output dependency files to vary according to any arbitrary specification (or combination of specifications), including CUDA version, machine architecture, Python version, etc.
- it contains bifurcated lists of dependencies based on the dependency's purpose (i.e. build, runtime, test, etc.). The bifurcated dependency lists are merged according to the description in the [_How Dependency Lists Are Merged_](#how-dependency-lists-are-merged) section below.
## `dependencies.yaml` Format
> The [Examples](#examples) section below has instructions on where example `dependency.yaml` files and their corresponding output can be viewed.
The `dependencies.yaml` file has three relevant top-level keys: `files`, `channels`, and `dependencies`. These keys are described in detail below.
### `files` Key
The top-level `files` key is responsible for determining the following:
- which types of dependency files should be generated (i.e. conda `environment.yaml` files and/or `requirements.txt` files)
- where the generated files should be written to (relative to the `dependencies.yaml` file)
- which variant files should be generated (based on the provided matrix)
- which of the dependency lists from the top-level `dependencies` key should be included in the generated files
Here is an example of what the `files` key might look like:
```yaml
files:
all: # used as the prefix for the generated dependency file names for conda or requirements files (has no effect on pyproject.toml files)
output: [conda, requirements] # which dependency file types to generate. required, can be "conda", "requirements", "pyproject", "none" or a list of non-"none" values
conda_dir: conda/environments # where to put conda environment.yaml files. optional, defaults to "conda/environments"
requirements_dir: python/cudf # where to put requirements.txt files. optional, but recommended. defaults to "python"
pyproject_dir: python/cudf # where to put pyproject.toml files. optional, but recommended. defaults to "python"
matrix: # (optional) contains an arbitrary set of key/value pairs to determine which dependency files that should be generated. These values are included in the output filename.
cuda: ["11.5", "11.6"] # which CUDA version variant files to generate.
arch: [x86_64] # which architecture version variant files to generate. This value should be the result of running the `arch` command on a given machine.
includes: # a list of keys from the `dependencies` section which should be included in the generated files
- build
- test
- runtime
build: # multiple `files` children keys can be specified
output: requirements
conda_dir: conda/environments
requirements_dir: python/cudf
matrix:
cuda: ["11.5"]
arch: [x86_64]
py: ["3.8"]
includes:
- build
```
The result of the above configuration is that the following dependency files would be generated:
- `conda/environments/all_cuda-115_arch-x86_64.yaml`
- `conda/environments/all_cuda-116_arch-x86_64.yaml`
- `python/cudf/requirements_all_cuda-115_arch-x86_64.txt`
- `python/cudf/requirements_all_cuda-116_arch-x86_64.txt`
- `python/cudf/requirements_build_cuda-115_arch-x86_64_py-38.txt`
The `all*.yaml` and `requirements_all*.txt` files would include the contents of the `build`, `test`, and `runtime` dependency lists from the top-level `dependency` key. The `requirements_build*.txt` file would only include the contents of the `build` dependency list from the top-level `dependency` key.
The value of `output` can also be `none` as shown below.
```yaml
files:
test:
output: none
includes:
- test
```
When `output: none` is used, the `conda_dir`, `requirements_dir` and `matrix` keys can be omitted. The use case for `output: none` is described in the [_Additional CLI Notes_](#additional-cli-notes) section below.
#### `extras`
A given file may include an `extras` entry that may be used to provide inputs specific to a particular file type
Here is an example:
```yaml
files:
build:
output: pyproject
includes: # a list of keys from the `dependencies` section which should be included in the generated files
- build
extras:
table: table_name
key: key_name
```
Currently the supported extras by file type are:
- pyproject.toml
- table: The table in pyproject.toml where the dependencies should be written. Acceptable values are "build-system", "project", and "project.optional-dependencies".
- key: The key corresponding to the dependency list in `table`. This may only be provided for the "project.optional-dependencies" table since the key name is fixed for "build-system" ("requires") and "project" ("dependencies"). Note that this implicitly prohibits including optional dependencies via an inline table under the "project" table.
### `channels` Key
The top-level `channels` key specifies the channels that should be included in any generated conda `environment.yaml` files.
It might look like this:
```yaml
channels:
- rapidsai
- conda-forge
```
In the absence of a `channels` key, some sensible defaults for RAPIDS will be used (see [constants.py](./src/rapids_dependency_file_generator/constants.py)).
### `dependencies` Key
The top-level `dependencies` key is where the bifurcated dependency lists should be specified.
Underneath the `dependencies` key are sets of key-value pairs. For each pair, the key can be arbitarily named, but should match an item from the `includes` list of any `files` entry.
The value of each key-value pair can have the following children keys:
- `common` - contains dependency lists that are the same across all matrix variations
- `specific` - contains dependency lists that are specific to a particular matrix combination
The values of each of these keys are described in detail below.
#### `common` Key
The `common` key contains a list of objects with the following keys:
- `output_types` - a list of output types (e.g. "conda" for `environment.yaml` files or "requirements" for `requirements.txt` files) for the packages in the `packages` key
- `packages` - a list of packages to be included in the generated output file
#### `specific` Key
The `specific` key contains a list of objects with the following keys:
- `output_types` - _same as `output_types` for the `common` key above_
- `matrices` - a list of objects (described below) which define packages that are specific to a particular matrix combination
##### `matrices` Key
Each list item under the `matrices` key contains a `matrix` key and a `packages` key.
The `matrix` key is used to define which matrix combinations from `files.[*].matrix` will use the associated packages.
The `packages` key is a list of packages to be included in the generated output file for a matching matrix.
This is elaborated on in [How Dependency Lists Are Merged](#how-dependency-lists-are-merged).
An example of the above structure is exemplified below:
```yaml
dependencies:
build: # dependency list name
common: # dependencies common among all matrix variations
- output_types: [conda, requirements] # the output types this list item should apply to
packages:
- common_build_dep
- output_types: conda
packages:
- cupy
- pip: # supports `pip` key for conda environment.yaml files
- some_random_dep
specific: # dependencies specific to a particular matrix combination
- output_types: conda # dependencies specific to conda environment.yaml files
matrices:
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
- matrix: # an empty matrix entry serves as a fallback if there are no other matrix matches
packages:
- cudatoolkit
- output_types: [conda, requirements]
matrices:
- matrix: # dependencies specific to x86_64 and 11.5
cuda: "11.5"
arch: x86_64
packages:
- a_random_x86_115_specific_dep
- matrix: # an empty matrix/package entry to prevent error from being thrown for non 11.5 and x86_64 matches
packages:
- output_types: requirements # dependencies specific to requirements.txt files
matrices:
- matrix:
cuda: "11.5"
packages:
- another_random_dep=11.5.0
- matrix:
cuda: "11.6"
packages:
- another_random_dep=11.6.0
test:
common:
- output_types: [conda, requirements]
packages:
- pytest
```
## How Dependency Lists Are Merged
The information from the top-level `files` and `dependencies` keys are used to determine which dependencies should be included in the final output of the generated dependency files.
Consider the following top-level `files` key configuration:
```yaml
files:
all:
output: conda
conda_dir: conda/environments
requirements_dir: python/cudf
matrix:
cuda: ["11.5", "11.6"]
arch: [x86_64]
includes:
- build
- test
```
In this example, `rapids-dependency-file-generator` will generate two conda environment files: `conda/environments/all_cuda-115_arch-x86_64.yaml` and `conda/environments/all_cuda-116_arch-x86_64.yaml`.
Since the `output` value is `conda`, `rapids-dependency-file-generator` will iterate through any `dependencies.build.common` and `dependencies.test.common` list entries and use the `packages` of any entry whose `output_types` key is `conda` or `[conda, ...]`.
Further, for the `11.5` and `x86_64` matrix combination, any `build.specific` and `test.specific` list items whose output includes `conda` and whose `matrices` list items matches any of the definitions below would also be merged:
```yaml
specific:
- output_types: conda
matrices:
- matrix:
cuda: "11.5"
packages:
- some_dep1
- some_dep2
# or
specific:
- output_types: conda
matrices:
- matrix:
cuda: "11.5"
arch: "x86_64"
packages:
- some_dep1
- some_dep2
# or
specific:
- output_types: conda
matrices:
- matrix:
arch: "x86_64"
packages:
- some_dep1
- some_dep2
```
Every `matrices` list must have a match for a given input matrix (only the first matching matrix in the list of `matrices` will be used).
If no matches are found for a particular matrix combination, an error will be thrown.
In instances where an error should not be thrown, an empty `matrix` and `packages` list item can be used:
```yaml
- output_types: conda
matrices:
- matrix:
cuda: "11.5"
arch: x86_64
py: "3.8"
packages:
- a_very_specific_115_x86_38_dep
- matrix: # an empty matrix entry serves as a fallback if there are no other matrix matches
packages:
```
Merged dependency lists are sorted and deduped.
## Additional CLI Notes
Invoking `rapids-dependency-file-generator` without any arguments is meant to be the default behavior for RAPIDS developers. It will generate all of the necessary dependency files as specified in the top-level `files` configuration.
However, there are CLI arguments that can augment the `files` configuration values before the files are generated.
Consider the example when `output: none` is used:
```yaml
files:
test:
output: none
includes:
- test
```
The `test` file generated by the configuration above is useful for CI, but it might not make sense to necessarily commit those files to a repository. In such a scenario, the following CLI arguments can be used:
```sh
ENV_NAME="cudf_test"
rapids-dependency-file-generator \
--file_key "test" \
--output "conda" \
--matrix "cuda=11.5;arch=$(arch)" > env.yaml
mamba env create --file env.yaml
mamba activate "$ENV_NAME"
# install cudf packages built in CI and test them in newly created environment...
```
The `--file_key` argument is passed the `test` key name from the `files` configuration. Additional flags are used to generate a single dependency file. When the CLI is used in this fashion, it will print to `stdout` instead of writing the resulting contents to the filesystem.
The `--file_key`, `--output`, and `--matrix` flags must be used together. `--matrix` may be an empty string if the file that should be generated does not depend on any specific matrix variations.
Running `rapids-dependency-file-generator -h` will show the most up-to-date CLI arguments.
## Examples
The [tests/examples](./tests/examples/) directory has example `dependencies.yaml` files along with their corresponding output files.
To create new `example` tests do the following:
- Create a new directory with a `dependencies.yaml` file in [tests/examples](tests/examples/)
- Ensure the `output` directories (e.g. `conda_dir`, `requirements_dir`, etc.) are set to write to `output/actual`
- Run `rapids-dependency-file-generator --config tests/examples/<new_folder_name>/dependencies.yaml` to generate the initial output files
- Manually inspect the generated files for correctness
- Copy the contents of `output/actual` to `output/expected`, so it will be committed to the repository and used as a baseline for future changes
- Add the new folder name to [test_examples.py](./tests/test_examples.py)
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dependency-file-generator/MANIFEST.in | include src/rapids_dependency_file_generator/schema.json
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dependency-file-generator/package-lock.json | {
"name": "rapids-dependency-file-generator",
"version": "1.2.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "rapids-dependency-file-generator",
"version": "1.2.0",
"license": "Apache-2.0",
"devDependencies": {
"@semantic-release/exec": "^6.0.3",
"@semantic-release/git": "^10.0.1",
"semantic-release": "^20.1.0"
}
},
"node_modules/@babel/code-frame": {
"version": "7.18.6",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz",
"integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==",
"dev": true,
"dependencies": {
"@babel/highlight": "^7.18.6"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-validator-identifier": {
"version": "7.19.1",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz",
"integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/highlight": {
"version": "7.18.6",
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz",
"integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==",
"dev": true,
"dependencies": {
"@babel/helper-validator-identifier": "^7.18.6",
"chalk": "^2.0.0",
"js-tokens": "^4.0.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@colors/colors": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz",
"integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==",
"dev": true,
"optional": true,
"engines": {
"node": ">=0.1.90"
}
},
"node_modules/@nodelib/fs.scandir": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
"integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
"dev": true,
"dependencies": {
"@nodelib/fs.stat": "2.0.5",
"run-parallel": "^1.1.9"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.stat": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
"integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
"dev": true,
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.walk": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
"integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
"dev": true,
"dependencies": {
"@nodelib/fs.scandir": "2.1.5",
"fastq": "^1.6.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@octokit/auth-token": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-3.0.3.tgz",
"integrity": "sha512-/aFM2M4HVDBT/jjDBa84sJniv1t9Gm/rLkalaz9htOm+L+8JMj1k9w0CkUdcxNyNxZPlTxKPVko+m1VlM58ZVA==",
"dev": true,
"dependencies": {
"@octokit/types": "^9.0.0"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/@octokit/core": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.0.tgz",
"integrity": "sha512-AgvDRUg3COpR82P7PBdGZF/NNqGmtMq2NiPqeSsDIeCfYFOZ9gddqWNQHnFdEUf+YwOj4aZYmJnlPp7OXmDIDg==",
"dev": true,
"dependencies": {
"@octokit/auth-token": "^3.0.0",
"@octokit/graphql": "^5.0.0",
"@octokit/request": "^6.0.0",
"@octokit/request-error": "^3.0.0",
"@octokit/types": "^9.0.0",
"before-after-hook": "^2.2.0",
"universal-user-agent": "^6.0.0"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/@octokit/endpoint": {
"version": "7.0.5",
"resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-7.0.5.tgz",
"integrity": "sha512-LG4o4HMY1Xoaec87IqQ41TQ+glvIeTKqfjkCEmt5AIwDZJwQeVZFIEYXrYY6yLwK+pAScb9Gj4q+Nz2qSw1roA==",
"dev": true,
"dependencies": {
"@octokit/types": "^9.0.0",
"is-plain-object": "^5.0.0",
"universal-user-agent": "^6.0.0"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/@octokit/graphql": {
"version": "5.0.5",
"resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-5.0.5.tgz",
"integrity": "sha512-Qwfvh3xdqKtIznjX9lz2D458r7dJPP8l6r4GQkIdWQouZwHQK0mVT88uwiU2bdTU2OtT1uOlKpRciUWldpG0yQ==",
"dev": true,
"dependencies": {
"@octokit/request": "^6.0.0",
"@octokit/types": "^9.0.0",
"universal-user-agent": "^6.0.0"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/@octokit/openapi-types": {
"version": "16.0.0",
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-16.0.0.tgz",
"integrity": "sha512-JbFWOqTJVLHZSUUoF4FzAZKYtqdxWu9Z5m2QQnOyEa04fOFljvyh7D3GYKbfuaSWisqehImiVIMG4eyJeP5VEA==",
"dev": true
},
"node_modules/@octokit/plugin-paginate-rest": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-6.0.0.tgz",
"integrity": "sha512-Sq5VU1PfT6/JyuXPyt04KZNVsFOSBaYOAq2QRZUwzVlI10KFvcbUo8lR258AAQL1Et60b0WuVik+zOWKLuDZxw==",
"dev": true,
"dependencies": {
"@octokit/types": "^9.0.0"
},
"engines": {
"node": ">= 14"
},
"peerDependencies": {
"@octokit/core": ">=4"
}
},
"node_modules/@octokit/plugin-request-log": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.4.tgz",
"integrity": "sha512-mLUsMkgP7K/cnFEw07kWqXGF5LKrOkD+lhCrKvPHXWDywAwuDUeDwWBpc69XK3pNX0uKiVt8g5z96PJ6z9xCFA==",
"dev": true,
"peerDependencies": {
"@octokit/core": ">=3"
}
},
"node_modules/@octokit/plugin-rest-endpoint-methods": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-7.0.1.tgz",
"integrity": "sha512-pnCaLwZBudK5xCdrR823xHGNgqOzRnJ/mpC/76YPpNP7DybdsJtP7mdOwh+wYZxK5jqeQuhu59ogMI4NRlBUvA==",
"dev": true,
"dependencies": {
"@octokit/types": "^9.0.0",
"deprecation": "^2.3.1"
},
"engines": {
"node": ">= 14"
},
"peerDependencies": {
"@octokit/core": ">=3"
}
},
"node_modules/@octokit/request": {
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/@octokit/request/-/request-6.2.3.tgz",
"integrity": "sha512-TNAodj5yNzrrZ/VxP+H5HiYaZep0H3GU0O7PaF+fhDrt8FPrnkei9Aal/txsN/1P7V3CPiThG0tIvpPDYUsyAA==",
"dev": true,
"dependencies": {
"@octokit/endpoint": "^7.0.0",
"@octokit/request-error": "^3.0.0",
"@octokit/types": "^9.0.0",
"is-plain-object": "^5.0.0",
"node-fetch": "^2.6.7",
"universal-user-agent": "^6.0.0"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/@octokit/request-error": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-3.0.3.tgz",
"integrity": "sha512-crqw3V5Iy2uOU5Np+8M/YexTlT8zxCfI+qu+LxUB7SZpje4Qmx3mub5DfEKSO8Ylyk0aogi6TYdf6kxzh2BguQ==",
"dev": true,
"dependencies": {
"@octokit/types": "^9.0.0",
"deprecation": "^2.0.0",
"once": "^1.4.0"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/@octokit/rest": {
"version": "19.0.7",
"resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-19.0.7.tgz",
"integrity": "sha512-HRtSfjrWmWVNp2uAkEpQnuGMJsu/+dBr47dRc5QVgsCbnIc1+GFEaoKBWkYG+zjrsHpSqcAElMio+n10c0b5JA==",
"dev": true,
"dependencies": {
"@octokit/core": "^4.1.0",
"@octokit/plugin-paginate-rest": "^6.0.0",
"@octokit/plugin-request-log": "^1.0.4",
"@octokit/plugin-rest-endpoint-methods": "^7.0.0"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/@octokit/types": {
"version": "9.0.0",
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-9.0.0.tgz",
"integrity": "sha512-LUewfj94xCMH2rbD5YJ+6AQ4AVjFYTgpp6rboWM5T7N3IsIF65SBEOVcYMGAEzO/kKNiNaW4LoWtoThOhH06gw==",
"dev": true,
"dependencies": {
"@octokit/openapi-types": "^16.0.0"
}
},
"node_modules/@pnpm/network.ca-file": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz",
"integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==",
"dev": true,
"dependencies": {
"graceful-fs": "4.2.10"
},
"engines": {
"node": ">=12.22.0"
}
},
"node_modules/@pnpm/npm-conf": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-1.0.5.tgz",
"integrity": "sha512-hD8ml183638O3R6/Txrh0L8VzGOrFXgRtRDG4qQC4tONdZ5Z1M+tlUUDUvrjYdmK6G+JTBTeaCLMna11cXzi8A==",
"dev": true,
"dependencies": {
"@pnpm/network.ca-file": "^1.0.1",
"config-chain": "^1.1.11"
},
"engines": {
"node": ">=12"
}
},
"node_modules/@semantic-release/commit-analyzer": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/@semantic-release/commit-analyzer/-/commit-analyzer-9.0.2.tgz",
"integrity": "sha512-E+dr6L+xIHZkX4zNMe6Rnwg4YQrWNXK+rNsvwOPpdFppvZO1olE2fIgWhv89TkQErygevbjsZFSIxp+u6w2e5g==",
"dev": true,
"dependencies": {
"conventional-changelog-angular": "^5.0.0",
"conventional-commits-filter": "^2.0.0",
"conventional-commits-parser": "^3.2.3",
"debug": "^4.0.0",
"import-from": "^4.0.0",
"lodash": "^4.17.4",
"micromatch": "^4.0.2"
},
"engines": {
"node": ">=14.17"
},
"peerDependencies": {
"semantic-release": ">=18.0.0-beta.1"
}
},
"node_modules/@semantic-release/error": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-3.0.0.tgz",
"integrity": "sha512-5hiM4Un+tpl4cKw3lV4UgzJj+SmfNIDCLLw0TepzQxz9ZGV5ixnqkzIVF+3tp0ZHgcMKE+VNGHJjEeyFG2dcSw==",
"dev": true,
"engines": {
"node": ">=14.17"
}
},
"node_modules/@semantic-release/exec": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/@semantic-release/exec/-/exec-6.0.3.tgz",
"integrity": "sha512-bxAq8vLOw76aV89vxxICecEa8jfaWwYITw6X74zzlO0mc/Bgieqx9kBRz9z96pHectiTAtsCwsQcUyLYWnp3VQ==",
"dev": true,
"dependencies": {
"@semantic-release/error": "^3.0.0",
"aggregate-error": "^3.0.0",
"debug": "^4.0.0",
"execa": "^5.0.0",
"lodash": "^4.17.4",
"parse-json": "^5.0.0"
},
"engines": {
"node": ">=14.17"
},
"peerDependencies": {
"semantic-release": ">=18.0.0"
}
},
"node_modules/@semantic-release/git": {
"version": "10.0.1",
"resolved": "https://registry.npmjs.org/@semantic-release/git/-/git-10.0.1.tgz",
"integrity": "sha512-eWrx5KguUcU2wUPaO6sfvZI0wPafUKAMNC18aXY4EnNcrZL86dEmpNVnC9uMpGZkmZJ9EfCVJBQx4pV4EMGT1w==",
"dev": true,
"dependencies": {
"@semantic-release/error": "^3.0.0",
"aggregate-error": "^3.0.0",
"debug": "^4.0.0",
"dir-glob": "^3.0.0",
"execa": "^5.0.0",
"lodash": "^4.17.4",
"micromatch": "^4.0.0",
"p-reduce": "^2.0.0"
},
"engines": {
"node": ">=14.17"
},
"peerDependencies": {
"semantic-release": ">=18.0.0"
}
},
"node_modules/@semantic-release/github": {
"version": "8.0.7",
"resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-8.0.7.tgz",
"integrity": "sha512-VtgicRIKGvmTHwm//iqTh/5NGQwsncOMR5vQK9pMT92Aem7dv37JFKKRuulUsAnUOIlO4G8wH3gPiBAA0iW0ww==",
"dev": true,
"dependencies": {
"@octokit/rest": "^19.0.0",
"@semantic-release/error": "^3.0.0",
"aggregate-error": "^3.0.0",
"bottleneck": "^2.18.1",
"debug": "^4.0.0",
"dir-glob": "^3.0.0",
"fs-extra": "^11.0.0",
"globby": "^11.0.0",
"http-proxy-agent": "^5.0.0",
"https-proxy-agent": "^5.0.0",
"issue-parser": "^6.0.0",
"lodash": "^4.17.4",
"mime": "^3.0.0",
"p-filter": "^2.0.0",
"p-retry": "^4.0.0",
"url-join": "^4.0.0"
},
"engines": {
"node": ">=14.17"
},
"peerDependencies": {
"semantic-release": ">=18.0.0-beta.1"
}
},
"node_modules/@semantic-release/npm": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-9.0.2.tgz",
"integrity": "sha512-zgsynF6McdzxPnFet+a4iO9HpAlARXOM5adz7VGVCvj0ne8wtL2ZOQoDV2wZPDmdEotDIbVeJjafhelZjs9j6g==",
"dev": true,
"dependencies": {
"@semantic-release/error": "^3.0.0",
"aggregate-error": "^3.0.0",
"execa": "^5.0.0",
"fs-extra": "^11.0.0",
"lodash": "^4.17.15",
"nerf-dart": "^1.0.0",
"normalize-url": "^6.0.0",
"npm": "^8.3.0",
"rc": "^1.2.8",
"read-pkg": "^5.0.0",
"registry-auth-token": "^5.0.0",
"semver": "^7.1.2",
"tempy": "^1.0.0"
},
"engines": {
"node": ">=16 || ^14.17"
},
"peerDependencies": {
"semantic-release": ">=19.0.0"
}
},
"node_modules/@semantic-release/release-notes-generator": {
"version": "10.0.3",
"resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-10.0.3.tgz",
"integrity": "sha512-k4x4VhIKneOWoBGHkx0qZogNjCldLPRiAjnIpMnlUh6PtaWXp/T+C9U7/TaNDDtgDa5HMbHl4WlREdxHio6/3w==",
"dev": true,
"dependencies": {
"conventional-changelog-angular": "^5.0.0",
"conventional-changelog-writer": "^5.0.0",
"conventional-commits-filter": "^2.0.0",
"conventional-commits-parser": "^3.2.3",
"debug": "^4.0.0",
"get-stream": "^6.0.0",
"import-from": "^4.0.0",
"into-stream": "^6.0.0",
"lodash": "^4.17.4",
"read-pkg-up": "^7.0.0"
},
"engines": {
"node": ">=14.17"
},
"peerDependencies": {
"semantic-release": ">=18.0.0-beta.1"
}
},
"node_modules/@semantic-release/release-notes-generator/node_modules/find-up": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
"integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
"dev": true,
"dependencies": {
"locate-path": "^5.0.0",
"path-exists": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/@semantic-release/release-notes-generator/node_modules/locate-path": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
"integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
"dev": true,
"dependencies": {
"p-locate": "^4.1.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/@semantic-release/release-notes-generator/node_modules/p-limit": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
"integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
"dev": true,
"dependencies": {
"p-try": "^2.0.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@semantic-release/release-notes-generator/node_modules/p-locate": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
"integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
"dev": true,
"dependencies": {
"p-limit": "^2.2.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/@semantic-release/release-notes-generator/node_modules/path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/@semantic-release/release-notes-generator/node_modules/read-pkg-up": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz",
"integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==",
"dev": true,
"dependencies": {
"find-up": "^4.1.0",
"read-pkg": "^5.2.0",
"type-fest": "^0.8.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@semantic-release/release-notes-generator/node_modules/type-fest": {
"version": "0.8.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
"integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/@tootallnate/once": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz",
"integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==",
"dev": true,
"engines": {
"node": ">= 10"
}
},
"node_modules/@types/minimist": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz",
"integrity": "sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==",
"dev": true
},
"node_modules/@types/normalize-package-data": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz",
"integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==",
"dev": true
},
"node_modules/@types/retry": {
"version": "0.12.0",
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
"integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==",
"dev": true
},
"node_modules/agent-base": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
"integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
"dev": true,
"dependencies": {
"debug": "4"
},
"engines": {
"node": ">= 6.0.0"
}
},
"node_modules/aggregate-error": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz",
"integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==",
"dev": true,
"dependencies": {
"clean-stack": "^2.0.0",
"indent-string": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/ansi-escapes": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-5.0.0.tgz",
"integrity": "sha512-5GFMVX8HqE/TB+FuBJGuO5XG0WrsA6ptUqoODaT/n9mmUaZFkqnBueB4leqGBCmrUHnCnC4PCZTCd0E7QQ83bA==",
"dev": true,
"dependencies": {
"type-fest": "^1.0.2"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/ansi-styles": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
"dev": true,
"dependencies": {
"color-convert": "^1.9.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/ansicolors": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/ansicolors/-/ansicolors-0.3.2.tgz",
"integrity": "sha512-QXu7BPrP29VllRxH8GwB7x5iX5qWKAAMLqKQGWTeLWVlNHNOpVMJ91dsxQAIWXpjuW5wqvxu3Jd/nRjrJ+0pqg==",
"dev": true
},
"node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
"dev": true
},
"node_modules/argv-formatter": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/argv-formatter/-/argv-formatter-1.0.0.tgz",
"integrity": "sha512-F2+Hkm9xFaRg+GkaNnbwXNDV5O6pnCFEmqyhvfC/Ic5LbgOWjJh3L+mN/s91rxVL3znE7DYVpW0GJFT+4YBgWw==",
"dev": true
},
"node_modules/array-ify": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/array-ify/-/array-ify-1.0.0.tgz",
"integrity": "sha512-c5AMf34bKdvPhQ7tBGhqkgKNUzMr4WUs+WDtC2ZUGOUncbxKMTvqxYctiseW3+L4bA8ec+GcZ6/A/FW4m8ukng==",
"dev": true
},
"node_modules/array-union": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
"integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/arrify": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz",
"integrity": "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"dev": true
},
"node_modules/before-after-hook": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz",
"integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==",
"dev": true
},
"node_modules/bottleneck": {
"version": "2.19.5",
"resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz",
"integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==",
"dev": true
},
"node_modules/brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"node_modules/braces": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
"dev": true,
"dependencies": {
"fill-range": "^7.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
"integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/camelcase": {
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
"integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/camelcase-keys": {
"version": "6.2.2",
"resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz",
"integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==",
"dev": true,
"dependencies": {
"camelcase": "^5.3.1",
"map-obj": "^4.0.0",
"quick-lru": "^4.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/cardinal": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/cardinal/-/cardinal-2.1.1.tgz",
"integrity": "sha512-JSr5eOgoEymtYHBjNWyjrMqet9Am2miJhlfKNdqLp6zoeAh0KN5dRAcxlecj5mAJrmQomgiOBj35xHLrFjqBpw==",
"dev": true,
"dependencies": {
"ansicolors": "~0.3.2",
"redeyed": "~2.1.0"
},
"bin": {
"cdl": "bin/cdl.js"
}
},
"node_modules/chalk": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
"dev": true,
"dependencies": {
"ansi-styles": "^3.2.1",
"escape-string-regexp": "^1.0.5",
"supports-color": "^5.3.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/clean-stack": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz",
"integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/cli-table3": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.3.tgz",
"integrity": "sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==",
"dev": true,
"dependencies": {
"string-width": "^4.2.0"
},
"engines": {
"node": "10.* || >= 12.*"
},
"optionalDependencies": {
"@colors/colors": "1.5.0"
}
},
"node_modules/cliui": {
"version": "8.0.1",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
"integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
"dev": true,
"dependencies": {
"string-width": "^4.2.0",
"strip-ansi": "^6.0.1",
"wrap-ansi": "^7.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/color-convert": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
"dev": true,
"dependencies": {
"color-name": "1.1.3"
}
},
"node_modules/color-name": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
"dev": true
},
"node_modules/compare-func": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/compare-func/-/compare-func-2.0.0.tgz",
"integrity": "sha512-zHig5N+tPWARooBnb0Zx1MFcdfpyJrfTJ3Y5L+IFvUm8rM74hHz66z0gw0x4tijh5CorKkKUCnW82R2vmpeCRA==",
"dev": true,
"dependencies": {
"array-ify": "^1.0.0",
"dot-prop": "^5.1.0"
}
},
"node_modules/concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
"dev": true
},
"node_modules/config-chain": {
"version": "1.1.13",
"resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz",
"integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==",
"dev": true,
"dependencies": {
"ini": "^1.3.4",
"proto-list": "~1.2.1"
}
},
"node_modules/conventional-changelog-angular": {
"version": "5.0.13",
"resolved": "https://registry.npmjs.org/conventional-changelog-angular/-/conventional-changelog-angular-5.0.13.tgz",
"integrity": "sha512-i/gipMxs7s8L/QeuavPF2hLnJgH6pEZAttySB6aiQLWcX3puWDL3ACVmvBhJGxnAy52Qc15ua26BufY6KpmrVA==",
"dev": true,
"dependencies": {
"compare-func": "^2.0.0",
"q": "^1.5.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/conventional-changelog-writer": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/conventional-changelog-writer/-/conventional-changelog-writer-5.0.1.tgz",
"integrity": "sha512-5WsuKUfxW7suLblAbFnxAcrvf6r+0b7GvNaWUwUIk0bXMnENP/PEieGKVUQrjPqwPT4o3EPAASBXiY6iHooLOQ==",
"dev": true,
"dependencies": {
"conventional-commits-filter": "^2.0.7",
"dateformat": "^3.0.0",
"handlebars": "^4.7.7",
"json-stringify-safe": "^5.0.1",
"lodash": "^4.17.15",
"meow": "^8.0.0",
"semver": "^6.0.0",
"split": "^1.0.0",
"through2": "^4.0.0"
},
"bin": {
"conventional-changelog-writer": "cli.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/conventional-changelog-writer/node_modules/semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"dev": true,
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/conventional-commits-filter": {
"version": "2.0.7",
"resolved": "https://registry.npmjs.org/conventional-commits-filter/-/conventional-commits-filter-2.0.7.tgz",
"integrity": "sha512-ASS9SamOP4TbCClsRHxIHXRfcGCnIoQqkvAzCSbZzTFLfcTqJVugB0agRgsEELsqaeWgsXv513eS116wnlSSPA==",
"dev": true,
"dependencies": {
"lodash.ismatch": "^4.4.0",
"modify-values": "^1.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/conventional-commits-parser": {
"version": "3.2.4",
"resolved": "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-3.2.4.tgz",
"integrity": "sha512-nK7sAtfi+QXbxHCYfhpZsfRtaitZLIA6889kFIouLvz6repszQDgxBu7wf2WbU+Dco7sAnNCJYERCwt54WPC2Q==",
"dev": true,
"dependencies": {
"is-text-path": "^1.0.1",
"JSONStream": "^1.0.4",
"lodash": "^4.17.15",
"meow": "^8.0.0",
"split2": "^3.0.0",
"through2": "^4.0.0"
},
"bin": {
"conventional-commits-parser": "cli.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/core-util-is": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
"integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==",
"dev": true
},
"node_modules/cosmiconfig": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.0.0.tgz",
"integrity": "sha512-da1EafcpH6b/TD8vDRaWV7xFINlHlF6zKsGwS1TsuVJTZRkquaS5HTMq7uq6h31619QjbsYl21gVDOm32KM1vQ==",
"dev": true,
"dependencies": {
"import-fresh": "^3.2.1",
"js-yaml": "^4.1.0",
"parse-json": "^5.0.0",
"path-type": "^4.0.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/cross-spawn": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
"dev": true,
"dependencies": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/crypto-random-string": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz",
"integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/dateformat": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/dateformat/-/dateformat-3.0.3.tgz",
"integrity": "sha512-jyCETtSl3VMZMWeRo7iY1FL19ges1t55hMo5yaam4Jrsm5EPL89UQkoQRyiI+Yf4k8r2ZpdngkV8hr1lIdjb3Q==",
"dev": true,
"engines": {
"node": "*"
}
},
"node_modules/debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"dev": true,
"dependencies": {
"ms": "2.1.2"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/decamelize": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
"integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/decamelize-keys": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.1.tgz",
"integrity": "sha512-WiPxgEirIV0/eIOMcnFBA3/IJZAZqKnwAwWyvvdi4lsr1WCN22nhdf/3db3DoZcUjTV2SqfzIwNyp6y2xs3nmg==",
"dev": true,
"dependencies": {
"decamelize": "^1.1.0",
"map-obj": "^1.0.0"
},
"engines": {
"node": ">=0.10.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/decamelize-keys/node_modules/map-obj": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz",
"integrity": "sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/deep-extend": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
"integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
"dev": true,
"engines": {
"node": ">=4.0.0"
}
},
"node_modules/del": {
"version": "6.1.1",
"resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz",
"integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==",
"dev": true,
"dependencies": {
"globby": "^11.0.1",
"graceful-fs": "^4.2.4",
"is-glob": "^4.0.1",
"is-path-cwd": "^2.2.0",
"is-path-inside": "^3.0.2",
"p-map": "^4.0.0",
"rimraf": "^3.0.2",
"slash": "^3.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/del/node_modules/p-map": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz",
"integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==",
"dev": true,
"dependencies": {
"aggregate-error": "^3.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/deprecation": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz",
"integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==",
"dev": true
},
"node_modules/dir-glob": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
"integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
"dev": true,
"dependencies": {
"path-type": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/dot-prop": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz",
"integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==",
"dev": true,
"dependencies": {
"is-obj": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/duplexer2": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz",
"integrity": "sha512-asLFVfWWtJ90ZyOUHMqk7/S2w2guQKxUI2itj3d92ADHhxUSbCMGi1f1cBcJ7xM1To+pE/Khbwo1yuNbMEPKeA==",
"dev": true,
"dependencies": {
"readable-stream": "^2.0.2"
}
},
"node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
"node_modules/env-ci": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/env-ci/-/env-ci-8.0.0.tgz",
"integrity": "sha512-W+3BqGZozFua9MPeXpmTm5eYEBtGgL76jGu/pwMVp/L8PdECSCEWaIp7d4Mw7kuUrbUldK0oV0bNd6ZZjLiMiA==",
"dev": true,
"dependencies": {
"execa": "^6.1.0",
"java-properties": "^1.0.2"
},
"engines": {
"node": "^16.10 || >=18"
}
},
"node_modules/env-ci/node_modules/execa": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/execa/-/execa-6.1.0.tgz",
"integrity": "sha512-QVWlX2e50heYJcCPG0iWtf8r0xjEYfz/OYLGDYH+IyjWezzPNxz63qNFOu0l4YftGWuizFVZHHs8PrLU5p2IDA==",
"dev": true,
"dependencies": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.1",
"human-signals": "^3.0.1",
"is-stream": "^3.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^5.1.0",
"onetime": "^6.0.0",
"signal-exit": "^3.0.7",
"strip-final-newline": "^3.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sindresorhus/execa?sponsor=1"
}
},
"node_modules/env-ci/node_modules/human-signals": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-3.0.1.tgz",
"integrity": "sha512-rQLskxnM/5OCldHo+wNXbpVgDn5A17CUoKX+7Sokwaknlq7CdSnphy0W39GU8dw59XiCXmFXDg4fRuckQRKewQ==",
"dev": true,
"engines": {
"node": ">=12.20.0"
}
},
"node_modules/env-ci/node_modules/is-stream": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
"integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
"dev": true,
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/env-ci/node_modules/mimic-fn": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
"integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/env-ci/node_modules/npm-run-path": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz",
"integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==",
"dev": true,
"dependencies": {
"path-key": "^4.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/env-ci/node_modules/onetime": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
"integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
"dev": true,
"dependencies": {
"mimic-fn": "^4.0.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/env-ci/node_modules/path-key": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
"integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/env-ci/node_modules/strip-final-newline": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
"integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/error-ex": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
"integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
"dev": true,
"dependencies": {
"is-arrayish": "^0.2.1"
}
},
"node_modules/escalade": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
"integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/escape-string-regexp": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
"dev": true,
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/esprima": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
"integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
"dev": true,
"bin": {
"esparse": "bin/esparse.js",
"esvalidate": "bin/esvalidate.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/execa": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
"integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
"dev": true,
"dependencies": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.0",
"human-signals": "^2.1.0",
"is-stream": "^2.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^4.0.1",
"onetime": "^5.1.2",
"signal-exit": "^3.0.3",
"strip-final-newline": "^2.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sindresorhus/execa?sponsor=1"
}
},
"node_modules/fast-glob": {
"version": "3.2.12",
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz",
"integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==",
"dev": true,
"dependencies": {
"@nodelib/fs.stat": "^2.0.2",
"@nodelib/fs.walk": "^1.2.3",
"glob-parent": "^5.1.2",
"merge2": "^1.3.0",
"micromatch": "^4.0.4"
},
"engines": {
"node": ">=8.6.0"
}
},
"node_modules/fastq": {
"version": "1.15.0",
"resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz",
"integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==",
"dev": true,
"dependencies": {
"reusify": "^1.0.4"
}
},
"node_modules/figures": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/figures/-/figures-5.0.0.tgz",
"integrity": "sha512-ej8ksPF4x6e5wvK9yevct0UCXh8TTFlWGVLlgjZuoBH1HwjIfKE/IdL5mq89sFA7zELi1VhKpmtDnrs7zWyeyg==",
"dev": true,
"dependencies": {
"escape-string-regexp": "^5.0.0",
"is-unicode-supported": "^1.2.0"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/figures/node_modules/escape-string-regexp": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz",
"integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/fill-range": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
"dev": true,
"dependencies": {
"to-regex-range": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/find-up": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz",
"integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==",
"dev": true,
"dependencies": {
"locate-path": "^7.1.0",
"path-exists": "^5.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/find-versions": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/find-versions/-/find-versions-5.1.0.tgz",
"integrity": "sha512-+iwzCJ7C5v5KgcBuueqVoNiHVoQpwiUK5XFLjf0affFTep+Wcw93tPvmb8tqujDNmzhBDPddnWV/qgWSXgq+Hg==",
"dev": true,
"dependencies": {
"semver-regex": "^4.0.5"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/from2": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
"integrity": "sha512-OMcX/4IC/uqEPVgGeyfN22LJk6AZrMkRZHxcHBMBvHScDGgwTm2GT2Wkgtocyd3JfZffjj2kYUDXXII0Fk9W0g==",
"dev": true,
"dependencies": {
"inherits": "^2.0.1",
"readable-stream": "^2.0.0"
}
},
"node_modules/fs-extra": {
"version": "11.1.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.1.0.tgz",
"integrity": "sha512-0rcTq621PD5jM/e0a3EJoGC/1TC5ZBCERW82LQuwfGnCa1V8w7dpYH1yNu+SLb6E5dkeCBzKEyLGlFrnr+dUyw==",
"dev": true,
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
"universalify": "^2.0.0"
},
"engines": {
"node": ">=14.14"
}
},
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
"dev": true
},
"node_modules/function-bind": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
"integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==",
"dev": true
},
"node_modules/get-caller-file": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
"dev": true,
"engines": {
"node": "6.* || 8.* || >= 10.*"
}
},
"node_modules/get-stream": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
"integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/git-log-parser": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/git-log-parser/-/git-log-parser-1.2.0.tgz",
"integrity": "sha512-rnCVNfkTL8tdNryFuaY0fYiBWEBcgF748O6ZI61rslBvr2o7U65c2/6npCRqH40vuAhtgtDiqLTJjBVdrejCzA==",
"dev": true,
"dependencies": {
"argv-formatter": "~1.0.0",
"spawn-error-forwarder": "~1.0.0",
"split2": "~1.0.0",
"stream-combiner2": "~1.1.1",
"through2": "~2.0.0",
"traverse": "~0.6.6"
}
},
"node_modules/git-log-parser/node_modules/split2": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/split2/-/split2-1.0.0.tgz",
"integrity": "sha512-NKywug4u4pX/AZBB1FCPzZ6/7O+Xhz1qMVbzTvvKvikjO99oPN87SkK08mEY9P63/5lWjK+wgOOgApnTg5r6qg==",
"dev": true,
"dependencies": {
"through2": "~2.0.0"
}
},
"node_modules/git-log-parser/node_modules/through2": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
"integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
"dev": true,
"dependencies": {
"readable-stream": "~2.3.6",
"xtend": "~4.0.1"
}
},
"node_modules/glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"dev": true,
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"dependencies": {
"is-glob": "^4.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/globby": {
"version": "11.1.0",
"resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
"integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
"dev": true,
"dependencies": {
"array-union": "^2.1.0",
"dir-glob": "^3.0.1",
"fast-glob": "^3.2.9",
"ignore": "^5.2.0",
"merge2": "^1.4.1",
"slash": "^3.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/graceful-fs": {
"version": "4.2.10",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz",
"integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==",
"dev": true
},
"node_modules/handlebars": {
"version": "4.7.7",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.7.tgz",
"integrity": "sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==",
"dev": true,
"dependencies": {
"minimist": "^1.2.5",
"neo-async": "^2.6.0",
"source-map": "^0.6.1",
"wordwrap": "^1.0.0"
},
"bin": {
"handlebars": "bin/handlebars"
},
"engines": {
"node": ">=0.4.7"
},
"optionalDependencies": {
"uglify-js": "^3.1.4"
}
},
"node_modules/hard-rejection": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz",
"integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/has": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
"integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
"dev": true,
"dependencies": {
"function-bind": "^1.1.1"
},
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/has-flag": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/hook-std": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/hook-std/-/hook-std-3.0.0.tgz",
"integrity": "sha512-jHRQzjSDzMtFy34AGj1DN+vq54WVuhSvKgrHf0OMiFQTwDD4L/qqofVEWjLOBMTn5+lCD3fPg32W9yOfnEJTTw==",
"dev": true,
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/hosted-git-info": {
"version": "6.1.1",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-6.1.1.tgz",
"integrity": "sha512-r0EI+HBMcXadMrugk0GCQ+6BQV39PiWAZVfq7oIckeGiN7sjRGyQxPdft3nQekFTCQbYxLBH+/axZMeH8UX6+w==",
"dev": true,
"dependencies": {
"lru-cache": "^7.5.1"
},
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/http-proxy-agent": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz",
"integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==",
"dev": true,
"dependencies": {
"@tootallnate/once": "2",
"agent-base": "6",
"debug": "4"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/https-proxy-agent": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz",
"integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==",
"dev": true,
"dependencies": {
"agent-base": "6",
"debug": "4"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/human-signals": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
"integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
"dev": true,
"engines": {
"node": ">=10.17.0"
}
},
"node_modules/ignore": {
"version": "5.2.4",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
"integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==",
"dev": true,
"engines": {
"node": ">= 4"
}
},
"node_modules/import-fresh": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
"integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
"dev": true,
"dependencies": {
"parent-module": "^1.0.0",
"resolve-from": "^4.0.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/import-fresh/node_modules/resolve-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/import-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/import-from/-/import-from-4.0.0.tgz",
"integrity": "sha512-P9J71vT5nLlDeV8FHs5nNxaLbrpfAV5cF5srvbZfpwpcJoM/xZR3hiv+q+SAnuSmuGbXMWud063iIMx/V/EWZQ==",
"dev": true,
"engines": {
"node": ">=12.2"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/indent-string": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
"integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"dev": true,
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"dev": true
},
"node_modules/ini": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
"dev": true
},
"node_modules/into-stream": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/into-stream/-/into-stream-6.0.0.tgz",
"integrity": "sha512-XHbaOAvP+uFKUFsOgoNPRjLkwB+I22JFPFe5OjTkQ0nwgj6+pSjb4NmB6VMxaPshLiOf+zcpOCBQuLwC1KHhZA==",
"dev": true,
"dependencies": {
"from2": "^2.3.0",
"p-is-promise": "^3.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-arrayish": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
"integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==",
"dev": true
},
"node_modules/is-core-module": {
"version": "2.11.0",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz",
"integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==",
"dev": true,
"dependencies": {
"has": "^1.0.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
"integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/is-glob": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
"integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
"dev": true,
"dependencies": {
"is-extglob": "^2.1.1"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true,
"engines": {
"node": ">=0.12.0"
}
},
"node_modules/is-obj": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz",
"integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/is-path-cwd": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz",
"integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/is-path-inside": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
"integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/is-plain-obj": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz",
"integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-plain-object": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz",
"integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-stream": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
"integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
"dev": true,
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-text-path": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/is-text-path/-/is-text-path-1.0.1.tgz",
"integrity": "sha512-xFuJpne9oFz5qDaodwmmG08e3CawH/2ZV8Qqza1Ko7Sk8POWbkRdwIoAWVhqvq0XeUzANEhKo2n0IXUGBm7A/w==",
"dev": true,
"dependencies": {
"text-extensions": "^1.0.0"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-unicode-supported": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-1.3.0.tgz",
"integrity": "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==",
"dev": true
},
"node_modules/isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
"dev": true
},
"node_modules/issue-parser": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/issue-parser/-/issue-parser-6.0.0.tgz",
"integrity": "sha512-zKa/Dxq2lGsBIXQ7CUZWTHfvxPC2ej0KfO7fIPqLlHB9J2hJ7rGhZ5rilhuufylr4RXYPzJUeFjKxz305OsNlA==",
"dev": true,
"dependencies": {
"lodash.capitalize": "^4.2.1",
"lodash.escaperegexp": "^4.1.2",
"lodash.isplainobject": "^4.0.6",
"lodash.isstring": "^4.0.1",
"lodash.uniqby": "^4.7.0"
},
"engines": {
"node": ">=10.13"
}
},
"node_modules/java-properties": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/java-properties/-/java-properties-1.0.2.tgz",
"integrity": "sha512-qjdpeo2yKlYTH7nFdK0vbZWuTCesk4o63v5iVOlhMQPfuIZQfW/HI35SjfhA+4qpg36rnFSvUK5b1m+ckIblQQ==",
"dev": true,
"engines": {
"node": ">= 0.6.0"
}
},
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
"dev": true
},
"node_modules/js-yaml": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dev": true,
"dependencies": {
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/json-parse-better-errors": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz",
"integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==",
"dev": true
},
"node_modules/json-parse-even-better-errors": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
"integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
"dev": true
},
"node_modules/json-stringify-safe": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
"integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==",
"dev": true
},
"node_modules/jsonfile": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
"integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
"dev": true,
"dependencies": {
"universalify": "^2.0.0"
},
"optionalDependencies": {
"graceful-fs": "^4.1.6"
}
},
"node_modules/jsonparse": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz",
"integrity": "sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==",
"dev": true,
"engines": [
"node >= 0.2.0"
]
},
"node_modules/JSONStream": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz",
"integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==",
"dev": true,
"dependencies": {
"jsonparse": "^1.2.0",
"through": ">=2.2.7 <3"
},
"bin": {
"JSONStream": "bin.js"
},
"engines": {
"node": "*"
}
},
"node_modules/kind-of": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
"integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/lines-and-columns": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
"integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
"dev": true
},
"node_modules/load-json-file": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz",
"integrity": "sha512-Kx8hMakjX03tiGTLAIdJ+lL0htKnXjEZN6hk/tozf/WOuYGdZBJrZ+rCJRbVCugsjB3jMLn9746NsQIf5VjBMw==",
"dev": true,
"dependencies": {
"graceful-fs": "^4.1.2",
"parse-json": "^4.0.0",
"pify": "^3.0.0",
"strip-bom": "^3.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/load-json-file/node_modules/parse-json": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz",
"integrity": "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==",
"dev": true,
"dependencies": {
"error-ex": "^1.3.1",
"json-parse-better-errors": "^1.0.1"
},
"engines": {
"node": ">=4"
}
},
"node_modules/locate-path": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz",
"integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==",
"dev": true,
"dependencies": {
"p-locate": "^6.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
"dev": true
},
"node_modules/lodash-es": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
"integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==",
"dev": true
},
"node_modules/lodash.capitalize": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/lodash.capitalize/-/lodash.capitalize-4.2.1.tgz",
"integrity": "sha512-kZzYOKspf8XVX5AvmQF94gQW0lejFVgb80G85bU4ZWzoJ6C03PQg3coYAUpSTpQWelrZELd3XWgHzw4Ck5kaIw==",
"dev": true
},
"node_modules/lodash.escaperegexp": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz",
"integrity": "sha512-TM9YBvyC84ZxE3rgfefxUWiQKLilstD6k7PTGt6wfbtXF8ixIJLOL3VYyV/z+ZiPLsVxAsKAFVwWlWeb2Y8Yyw==",
"dev": true
},
"node_modules/lodash.ismatch": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/lodash.ismatch/-/lodash.ismatch-4.4.0.tgz",
"integrity": "sha512-fPMfXjGQEV9Xsq/8MTSgUf255gawYRbjwMyDbcvDhXgV7enSZA0hynz6vMPnpAb5iONEzBHBPsT+0zes5Z301g==",
"dev": true
},
"node_modules/lodash.isplainobject": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
"integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==",
"dev": true
},
"node_modules/lodash.isstring": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz",
"integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==",
"dev": true
},
"node_modules/lodash.uniqby": {
"version": "4.7.0",
"resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz",
"integrity": "sha512-e/zcLx6CSbmaEgFHCA7BnoQKyCtKMxnuWrJygbwPs/AIn+IMKl66L8/s+wBUn5LRw2pZx3bUHibiV1b6aTWIww==",
"dev": true
},
"node_modules/lru-cache": {
"version": "7.17.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.17.0.tgz",
"integrity": "sha512-zSxlVVwOabhVyTi6E8gYv2cr6bXK+8ifYz5/uyJb9feXX6NACVDwY4p5Ut3WC3Ivo/QhpARHU3iujx2xGAYHbQ==",
"dev": true,
"engines": {
"node": ">=12"
}
},
"node_modules/map-obj": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz",
"integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==",
"dev": true,
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/marked": {
"version": "4.2.12",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.2.12.tgz",
"integrity": "sha512-yr8hSKa3Fv4D3jdZmtMMPghgVt6TWbk86WQaWhDloQjRSQhMMYCAro7jP7VDJrjjdV8pxVxMssXS8B8Y5DZ5aw==",
"dev": true,
"bin": {
"marked": "bin/marked.js"
},
"engines": {
"node": ">= 12"
}
},
"node_modules/marked-terminal": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/marked-terminal/-/marked-terminal-5.1.1.tgz",
"integrity": "sha512-+cKTOx9P4l7HwINYhzbrBSyzgxO2HaHKGZGuB1orZsMIgXYaJyfidT81VXRdpelW/PcHEWxywscePVgI/oUF6g==",
"dev": true,
"dependencies": {
"ansi-escapes": "^5.0.0",
"cardinal": "^2.1.1",
"chalk": "^5.0.0",
"cli-table3": "^0.6.1",
"node-emoji": "^1.11.0",
"supports-hyperlinks": "^2.2.0"
},
"engines": {
"node": ">=14.13.1 || >=16.0.0"
},
"peerDependencies": {
"marked": "^1.0.0 || ^2.0.0 || ^3.0.0 || ^4.0.0"
}
},
"node_modules/marked-terminal/node_modules/chalk": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-5.2.0.tgz",
"integrity": "sha512-ree3Gqw/nazQAPuJJEy+avdl7QfZMcUvmHIKgEZkGL+xOBzRvup5Hxo6LHuMceSxOabuJLJm5Yp/92R9eMmMvA==",
"dev": true,
"engines": {
"node": "^12.17.0 || ^14.13 || >=16.0.0"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/meow": {
"version": "8.1.2",
"resolved": "https://registry.npmjs.org/meow/-/meow-8.1.2.tgz",
"integrity": "sha512-r85E3NdZ+mpYk1C6RjPFEMSE+s1iZMuHtsHAqY0DT3jZczl0diWUZ8g6oU7h0M9cD2EL+PzaYghhCLzR0ZNn5Q==",
"dev": true,
"dependencies": {
"@types/minimist": "^1.2.0",
"camelcase-keys": "^6.2.2",
"decamelize-keys": "^1.1.0",
"hard-rejection": "^2.1.0",
"minimist-options": "4.1.0",
"normalize-package-data": "^3.0.0",
"read-pkg-up": "^7.0.1",
"redent": "^3.0.0",
"trim-newlines": "^3.0.0",
"type-fest": "^0.18.0",
"yargs-parser": "^20.2.3"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/meow/node_modules/find-up": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
"integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
"dev": true,
"dependencies": {
"locate-path": "^5.0.0",
"path-exists": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/meow/node_modules/locate-path": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
"integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
"dev": true,
"dependencies": {
"p-locate": "^4.1.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/meow/node_modules/p-limit": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
"integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
"dev": true,
"dependencies": {
"p-try": "^2.0.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/meow/node_modules/p-locate": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
"integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
"dev": true,
"dependencies": {
"p-limit": "^2.2.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/meow/node_modules/path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/meow/node_modules/read-pkg-up": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz",
"integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==",
"dev": true,
"dependencies": {
"find-up": "^4.1.0",
"read-pkg": "^5.2.0",
"type-fest": "^0.8.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/meow/node_modules/read-pkg-up/node_modules/type-fest": {
"version": "0.8.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
"integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/meow/node_modules/type-fest": {
"version": "0.18.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz",
"integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
"integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
"dev": true
},
"node_modules/merge2": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
"dev": true,
"engines": {
"node": ">= 8"
}
},
"node_modules/micromatch": {
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
"integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
"dev": true,
"dependencies": {
"braces": "^3.0.2",
"picomatch": "^2.3.1"
},
"engines": {
"node": ">=8.6"
}
},
"node_modules/mime": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-3.0.0.tgz",
"integrity": "sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A==",
"dev": true,
"bin": {
"mime": "cli.js"
},
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/mimic-fn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
"integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/min-indent": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz",
"integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/minimatch": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"dev": true,
"dependencies": {
"brace-expansion": "^1.1.7"
},
"engines": {
"node": "*"
}
},
"node_modules/minimist": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
"integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/minimist-options": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz",
"integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==",
"dev": true,
"dependencies": {
"arrify": "^1.0.1",
"is-plain-obj": "^1.1.0",
"kind-of": "^6.0.3"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/modify-values": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/modify-values/-/modify-values-1.0.1.tgz",
"integrity": "sha512-xV2bxeN6F7oYjZWTe/YPAy6MN2M+sL4u/Rlm2AHCIVGfo2p1yGmBHQ6vHehl4bRTZBdHu3TSkWdYgkwpYzAGSw==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
"dev": true
},
"node_modules/neo-async": {
"version": "2.6.2",
"resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
"integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==",
"dev": true
},
"node_modules/nerf-dart": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/nerf-dart/-/nerf-dart-1.0.0.tgz",
"integrity": "sha512-EZSPZB70jiVsivaBLYDCyntd5eH8NTSMOn3rB+HxwdmKThGELLdYv8qVIMWvZEFy9w8ZZpW9h9OB32l1rGtj7g==",
"dev": true
},
"node_modules/node-emoji": {
"version": "1.11.0",
"resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz",
"integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==",
"dev": true,
"dependencies": {
"lodash": "^4.17.21"
}
},
"node_modules/node-fetch": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.9.tgz",
"integrity": "sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg==",
"dev": true,
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/normalize-package-data": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.3.tgz",
"integrity": "sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==",
"dev": true,
"dependencies": {
"hosted-git-info": "^4.0.1",
"is-core-module": "^2.5.0",
"semver": "^7.3.4",
"validate-npm-package-license": "^3.0.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/normalize-package-data/node_modules/hosted-git-info": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz",
"integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/normalize-package-data/node_modules/lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/normalize-url": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz",
"integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/npm": {
"version": "8.19.4",
"resolved": "https://registry.npmjs.org/npm/-/npm-8.19.4.tgz",
"integrity": "sha512-3HANl8i9DKnUA89P4KEgVNN28EjSeDCmvEqbzOAuxCFDzdBZzjUl99zgnGpOUumvW5lvJo2HKcjrsc+tfyv1Hw==",
"bundleDependencies": [
"@isaacs/string-locale-compare",
"@npmcli/arborist",
"@npmcli/ci-detect",
"@npmcli/config",
"@npmcli/fs",
"@npmcli/map-workspaces",
"@npmcli/package-json",
"@npmcli/run-script",
"abbrev",
"archy",
"cacache",
"chalk",
"chownr",
"cli-columns",
"cli-table3",
"columnify",
"fastest-levenshtein",
"fs-minipass",
"glob",
"graceful-fs",
"hosted-git-info",
"ini",
"init-package-json",
"is-cidr",
"json-parse-even-better-errors",
"libnpmaccess",
"libnpmdiff",
"libnpmexec",
"libnpmfund",
"libnpmhook",
"libnpmorg",
"libnpmpack",
"libnpmpublish",
"libnpmsearch",
"libnpmteam",
"libnpmversion",
"make-fetch-happen",
"minimatch",
"minipass",
"minipass-pipeline",
"mkdirp",
"mkdirp-infer-owner",
"ms",
"node-gyp",
"nopt",
"npm-audit-report",
"npm-install-checks",
"npm-package-arg",
"npm-pick-manifest",
"npm-profile",
"npm-registry-fetch",
"npm-user-validate",
"npmlog",
"opener",
"p-map",
"pacote",
"parse-conflict-json",
"proc-log",
"qrcode-terminal",
"read",
"read-package-json",
"read-package-json-fast",
"readdir-scoped-modules",
"rimraf",
"semver",
"ssri",
"tar",
"text-table",
"tiny-relative-date",
"treeverse",
"validate-npm-package-name",
"which",
"write-file-atomic"
],
"dev": true,
"dependencies": {
"@isaacs/string-locale-compare": "^1.1.0",
"@npmcli/arborist": "^5.6.3",
"@npmcli/ci-detect": "^2.0.0",
"@npmcli/config": "^4.2.1",
"@npmcli/fs": "^2.1.0",
"@npmcli/map-workspaces": "^2.0.3",
"@npmcli/package-json": "^2.0.0",
"@npmcli/run-script": "^4.2.1",
"abbrev": "~1.1.1",
"archy": "~1.0.0",
"cacache": "^16.1.3",
"chalk": "^4.1.2",
"chownr": "^2.0.0",
"cli-columns": "^4.0.0",
"cli-table3": "^0.6.2",
"columnify": "^1.6.0",
"fastest-levenshtein": "^1.0.12",
"fs-minipass": "^2.1.0",
"glob": "^8.0.1",
"graceful-fs": "^4.2.10",
"hosted-git-info": "^5.2.1",
"ini": "^3.0.1",
"init-package-json": "^3.0.2",
"is-cidr": "^4.0.2",
"json-parse-even-better-errors": "^2.3.1",
"libnpmaccess": "^6.0.4",
"libnpmdiff": "^4.0.5",
"libnpmexec": "^4.0.14",
"libnpmfund": "^3.0.5",
"libnpmhook": "^8.0.4",
"libnpmorg": "^4.0.4",
"libnpmpack": "^4.1.3",
"libnpmpublish": "^6.0.5",
"libnpmsearch": "^5.0.4",
"libnpmteam": "^4.0.4",
"libnpmversion": "^3.0.7",
"make-fetch-happen": "^10.2.0",
"minimatch": "^5.1.0",
"minipass": "^3.1.6",
"minipass-pipeline": "^1.2.4",
"mkdirp": "^1.0.4",
"mkdirp-infer-owner": "^2.0.0",
"ms": "^2.1.2",
"node-gyp": "^9.1.0",
"nopt": "^6.0.0",
"npm-audit-report": "^3.0.0",
"npm-install-checks": "^5.0.0",
"npm-package-arg": "^9.1.0",
"npm-pick-manifest": "^7.0.2",
"npm-profile": "^6.2.0",
"npm-registry-fetch": "^13.3.1",
"npm-user-validate": "^1.0.1",
"npmlog": "^6.0.2",
"opener": "^1.5.2",
"p-map": "^4.0.0",
"pacote": "^13.6.2",
"parse-conflict-json": "^2.0.2",
"proc-log": "^2.0.1",
"qrcode-terminal": "^0.12.0",
"read": "~1.0.7",
"read-package-json": "^5.0.2",
"read-package-json-fast": "^2.0.3",
"readdir-scoped-modules": "^1.1.0",
"rimraf": "^3.0.2",
"semver": "^7.3.7",
"ssri": "^9.0.1",
"tar": "^6.1.11",
"text-table": "~0.2.0",
"tiny-relative-date": "^1.3.0",
"treeverse": "^2.0.0",
"validate-npm-package-name": "^4.0.0",
"which": "^2.0.2",
"write-file-atomic": "^4.0.1"
},
"bin": {
"npm": "bin/npm-cli.js",
"npx": "bin/npx-cli.js"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm-run-path": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
"integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
"dev": true,
"dependencies": {
"path-key": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/@colors/colors": {
"version": "1.5.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"optional": true,
"engines": {
"node": ">=0.1.90"
}
},
"node_modules/npm/node_modules/@gar/promisify": {
"version": "1.1.3",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/@isaacs/string-locale-compare": {
"version": "1.1.0",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/@npmcli/arborist": {
"version": "5.6.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@isaacs/string-locale-compare": "^1.1.0",
"@npmcli/installed-package-contents": "^1.0.7",
"@npmcli/map-workspaces": "^2.0.3",
"@npmcli/metavuln-calculator": "^3.0.1",
"@npmcli/move-file": "^2.0.0",
"@npmcli/name-from-folder": "^1.0.1",
"@npmcli/node-gyp": "^2.0.0",
"@npmcli/package-json": "^2.0.0",
"@npmcli/query": "^1.2.0",
"@npmcli/run-script": "^4.1.3",
"bin-links": "^3.0.3",
"cacache": "^16.1.3",
"common-ancestor-path": "^1.0.1",
"hosted-git-info": "^5.2.1",
"json-parse-even-better-errors": "^2.3.1",
"json-stringify-nice": "^1.1.4",
"minimatch": "^5.1.0",
"mkdirp": "^1.0.4",
"mkdirp-infer-owner": "^2.0.0",
"nopt": "^6.0.0",
"npm-install-checks": "^5.0.0",
"npm-package-arg": "^9.0.0",
"npm-pick-manifest": "^7.0.2",
"npm-registry-fetch": "^13.0.0",
"npmlog": "^6.0.2",
"pacote": "^13.6.1",
"parse-conflict-json": "^2.0.1",
"proc-log": "^2.0.0",
"promise-all-reject-late": "^1.0.0",
"promise-call-limit": "^1.0.1",
"read-package-json-fast": "^2.0.2",
"readdir-scoped-modules": "^1.1.0",
"rimraf": "^3.0.2",
"semver": "^7.3.7",
"ssri": "^9.0.0",
"treeverse": "^2.0.0",
"walk-up-path": "^1.0.0"
},
"bin": {
"arborist": "bin/index.js"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/ci-detect": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16"
}
},
"node_modules/npm/node_modules/@npmcli/config": {
"version": "4.2.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/map-workspaces": "^2.0.2",
"ini": "^3.0.0",
"mkdirp-infer-owner": "^2.0.0",
"nopt": "^6.0.0",
"proc-log": "^2.0.0",
"read-package-json-fast": "^2.0.3",
"semver": "^7.3.5",
"walk-up-path": "^1.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/disparity-colors": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"ansi-styles": "^4.3.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/fs": {
"version": "2.1.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@gar/promisify": "^1.1.3",
"semver": "^7.3.5"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/git": {
"version": "3.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/promise-spawn": "^3.0.0",
"lru-cache": "^7.4.4",
"mkdirp": "^1.0.4",
"npm-pick-manifest": "^7.0.0",
"proc-log": "^2.0.0",
"promise-inflight": "^1.0.1",
"promise-retry": "^2.0.1",
"semver": "^7.3.5",
"which": "^2.0.2"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/installed-package-contents": {
"version": "1.0.7",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"npm-bundled": "^1.1.1",
"npm-normalize-package-bin": "^1.0.1"
},
"bin": {
"installed-package-contents": "index.js"
},
"engines": {
"node": ">= 10"
}
},
"node_modules/npm/node_modules/@npmcli/installed-package-contents/node_modules/npm-bundled": {
"version": "1.1.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"npm-normalize-package-bin": "^1.0.1"
}
},
"node_modules/npm/node_modules/@npmcli/map-workspaces": {
"version": "2.0.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/name-from-folder": "^1.0.1",
"glob": "^8.0.1",
"minimatch": "^5.0.1",
"read-package-json-fast": "^2.0.3"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/metavuln-calculator": {
"version": "3.1.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"cacache": "^16.0.0",
"json-parse-even-better-errors": "^2.3.1",
"pacote": "^13.0.3",
"semver": "^7.3.5"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/move-file": {
"version": "2.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"mkdirp": "^1.0.4",
"rimraf": "^3.0.2"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/name-from-folder": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/@npmcli/node-gyp": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/package-json": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"json-parse-even-better-errors": "^2.3.1"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/promise-spawn": {
"version": "3.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"infer-owner": "^1.0.4"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/query": {
"version": "1.2.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"npm-package-arg": "^9.1.0",
"postcss-selector-parser": "^6.0.10",
"semver": "^7.3.7"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@npmcli/run-script": {
"version": "4.2.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/node-gyp": "^2.0.0",
"@npmcli/promise-spawn": "^3.0.0",
"node-gyp": "^9.0.0",
"read-package-json-fast": "^2.0.3",
"which": "^2.0.2"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/@tootallnate/once": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">= 10"
}
},
"node_modules/npm/node_modules/abbrev": {
"version": "1.1.1",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/agent-base": {
"version": "6.0.2",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"debug": "4"
},
"engines": {
"node": ">= 6.0.0"
}
},
"node_modules/npm/node_modules/agentkeepalive": {
"version": "4.2.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"debug": "^4.1.0",
"depd": "^1.1.2",
"humanize-ms": "^1.2.1"
},
"engines": {
"node": ">= 8.0.0"
}
},
"node_modules/npm/node_modules/aggregate-error": {
"version": "3.1.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"clean-stack": "^2.0.0",
"indent-string": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/ansi-regex": {
"version": "5.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/ansi-styles": {
"version": "4.3.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/npm/node_modules/aproba": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/archy": {
"version": "1.0.0",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/are-we-there-yet": {
"version": "3.0.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"delegates": "^1.0.0",
"readable-stream": "^3.6.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/asap": {
"version": "2.0.6",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/balanced-match": {
"version": "1.0.2",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/bin-links": {
"version": "3.0.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"cmd-shim": "^5.0.0",
"mkdirp-infer-owner": "^2.0.0",
"npm-normalize-package-bin": "^2.0.0",
"read-cmd-shim": "^3.0.0",
"rimraf": "^3.0.0",
"write-file-atomic": "^4.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/bin-links/node_modules/npm-normalize-package-bin": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/binary-extensions": {
"version": "2.2.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/brace-expansion": {
"version": "2.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/npm/node_modules/builtins": {
"version": "5.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"semver": "^7.0.0"
}
},
"node_modules/npm/node_modules/cacache": {
"version": "16.1.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/fs": "^2.1.0",
"@npmcli/move-file": "^2.0.0",
"chownr": "^2.0.0",
"fs-minipass": "^2.1.0",
"glob": "^8.0.1",
"infer-owner": "^1.0.4",
"lru-cache": "^7.7.1",
"minipass": "^3.1.6",
"minipass-collect": "^1.0.2",
"minipass-flush": "^1.0.5",
"minipass-pipeline": "^1.2.4",
"mkdirp": "^1.0.4",
"p-map": "^4.0.0",
"promise-inflight": "^1.0.1",
"rimraf": "^3.0.2",
"ssri": "^9.0.0",
"tar": "^6.1.11",
"unique-filename": "^2.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/chalk": {
"version": "4.1.2",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/npm/node_modules/chownr": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/cidr-regex": {
"version": "3.1.1",
"dev": true,
"inBundle": true,
"license": "BSD-2-Clause",
"dependencies": {
"ip-regex": "^4.1.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/clean-stack": {
"version": "2.2.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/npm/node_modules/cli-columns": {
"version": "4.0.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"string-width": "^4.2.3",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">= 10"
}
},
"node_modules/npm/node_modules/cli-table3": {
"version": "0.6.2",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"string-width": "^4.2.0"
},
"engines": {
"node": "10.* || >= 12.*"
},
"optionalDependencies": {
"@colors/colors": "1.5.0"
}
},
"node_modules/npm/node_modules/clone": {
"version": "1.0.4",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=0.8"
}
},
"node_modules/npm/node_modules/cmd-shim": {
"version": "5.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"mkdirp-infer-owner": "^2.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/color-convert": {
"version": "2.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/npm/node_modules/color-name": {
"version": "1.1.4",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/color-support": {
"version": "1.1.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"bin": {
"color-support": "bin.js"
}
},
"node_modules/npm/node_modules/columnify": {
"version": "1.6.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"strip-ansi": "^6.0.1",
"wcwidth": "^1.0.0"
},
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/npm/node_modules/common-ancestor-path": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/concat-map": {
"version": "0.0.1",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/console-control-strings": {
"version": "1.1.0",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/cssesc": {
"version": "3.0.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"bin": {
"cssesc": "bin/cssesc"
},
"engines": {
"node": ">=4"
}
},
"node_modules/npm/node_modules/debug": {
"version": "4.3.4",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"ms": "2.1.2"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/npm/node_modules/debug/node_modules/ms": {
"version": "2.1.2",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/debuglog": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": "*"
}
},
"node_modules/npm/node_modules/defaults": {
"version": "1.0.3",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"clone": "^1.0.2"
}
},
"node_modules/npm/node_modules/delegates": {
"version": "1.0.0",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/depd": {
"version": "1.1.2",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/npm/node_modules/dezalgo": {
"version": "1.0.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"asap": "^2.0.0",
"wrappy": "1"
}
},
"node_modules/npm/node_modules/diff": {
"version": "5.1.0",
"dev": true,
"inBundle": true,
"license": "BSD-3-Clause",
"engines": {
"node": ">=0.3.1"
}
},
"node_modules/npm/node_modules/emoji-regex": {
"version": "8.0.0",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/encoding": {
"version": "0.1.13",
"dev": true,
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
"iconv-lite": "^0.6.2"
}
},
"node_modules/npm/node_modules/env-paths": {
"version": "2.2.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/npm/node_modules/err-code": {
"version": "2.0.3",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/fastest-levenshtein": {
"version": "1.0.12",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/fs-minipass": {
"version": "2.1.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"minipass": "^3.0.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/npm/node_modules/fs.realpath": {
"version": "1.0.0",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/function-bind": {
"version": "1.1.1",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/gauge": {
"version": "4.0.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"aproba": "^1.0.3 || ^2.0.0",
"color-support": "^1.1.3",
"console-control-strings": "^1.1.0",
"has-unicode": "^2.0.1",
"signal-exit": "^3.0.7",
"string-width": "^4.2.3",
"strip-ansi": "^6.0.1",
"wide-align": "^1.1.5"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/glob": {
"version": "8.0.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^5.0.1",
"once": "^1.3.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/npm/node_modules/graceful-fs": {
"version": "4.2.10",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/has": {
"version": "1.0.3",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"function-bind": "^1.1.1"
},
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/npm/node_modules/has-flag": {
"version": "4.0.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/has-unicode": {
"version": "2.0.1",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/hosted-git-info": {
"version": "5.2.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"lru-cache": "^7.5.1"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/http-cache-semantics": {
"version": "4.1.1",
"dev": true,
"inBundle": true,
"license": "BSD-2-Clause"
},
"node_modules/npm/node_modules/http-proxy-agent": {
"version": "5.0.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"@tootallnate/once": "2",
"agent-base": "6",
"debug": "4"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/npm/node_modules/https-proxy-agent": {
"version": "5.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"agent-base": "6",
"debug": "4"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/npm/node_modules/humanize-ms": {
"version": "1.2.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"ms": "^2.0.0"
}
},
"node_modules/npm/node_modules/iconv-lite": {
"version": "0.6.3",
"dev": true,
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3.0.0"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/npm/node_modules/ignore-walk": {
"version": "5.0.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"minimatch": "^5.0.1"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/imurmurhash": {
"version": "0.1.4",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=0.8.19"
}
},
"node_modules/npm/node_modules/indent-string": {
"version": "4.0.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/infer-owner": {
"version": "1.0.4",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/inflight": {
"version": "1.0.6",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/npm/node_modules/inherits": {
"version": "2.0.4",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/ini": {
"version": "3.0.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/init-package-json": {
"version": "3.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"npm-package-arg": "^9.0.1",
"promzard": "^0.3.0",
"read": "^1.0.7",
"read-package-json": "^5.0.0",
"semver": "^7.3.5",
"validate-npm-package-license": "^3.0.4",
"validate-npm-package-name": "^4.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/ip": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/ip-regex": {
"version": "4.3.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/is-cidr": {
"version": "4.0.2",
"dev": true,
"inBundle": true,
"license": "BSD-2-Clause",
"dependencies": {
"cidr-regex": "^3.1.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/is-core-module": {
"version": "2.10.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"has": "^1.0.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/npm/node_modules/is-fullwidth-code-point": {
"version": "3.0.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/is-lambda": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/isexe": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/json-parse-even-better-errors": {
"version": "2.3.1",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/json-stringify-nice": {
"version": "1.1.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/npm/node_modules/jsonparse": {
"version": "1.3.1",
"dev": true,
"engines": [
"node >= 0.2.0"
],
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/just-diff": {
"version": "5.1.1",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/just-diff-apply": {
"version": "5.4.1",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/libnpmaccess": {
"version": "6.0.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"aproba": "^2.0.0",
"minipass": "^3.1.1",
"npm-package-arg": "^9.0.1",
"npm-registry-fetch": "^13.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmdiff": {
"version": "4.0.5",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/disparity-colors": "^2.0.0",
"@npmcli/installed-package-contents": "^1.0.7",
"binary-extensions": "^2.2.0",
"diff": "^5.1.0",
"minimatch": "^5.0.1",
"npm-package-arg": "^9.0.1",
"pacote": "^13.6.1",
"tar": "^6.1.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmexec": {
"version": "4.0.14",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/arborist": "^5.6.3",
"@npmcli/ci-detect": "^2.0.0",
"@npmcli/fs": "^2.1.1",
"@npmcli/run-script": "^4.2.0",
"chalk": "^4.1.0",
"mkdirp-infer-owner": "^2.0.0",
"npm-package-arg": "^9.0.1",
"npmlog": "^6.0.2",
"pacote": "^13.6.1",
"proc-log": "^2.0.0",
"read": "^1.0.7",
"read-package-json-fast": "^2.0.2",
"semver": "^7.3.7",
"walk-up-path": "^1.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmfund": {
"version": "3.0.5",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/arborist": "^5.6.3"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmhook": {
"version": "8.0.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"aproba": "^2.0.0",
"npm-registry-fetch": "^13.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmorg": {
"version": "4.0.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"aproba": "^2.0.0",
"npm-registry-fetch": "^13.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmpack": {
"version": "4.1.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/run-script": "^4.1.3",
"npm-package-arg": "^9.0.1",
"pacote": "^13.6.1"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmpublish": {
"version": "6.0.5",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"normalize-package-data": "^4.0.0",
"npm-package-arg": "^9.0.1",
"npm-registry-fetch": "^13.0.0",
"semver": "^7.3.7",
"ssri": "^9.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmsearch": {
"version": "5.0.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"npm-registry-fetch": "^13.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmteam": {
"version": "4.0.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"aproba": "^2.0.0",
"npm-registry-fetch": "^13.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/libnpmversion": {
"version": "3.0.7",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/git": "^3.0.0",
"@npmcli/run-script": "^4.1.3",
"json-parse-even-better-errors": "^2.3.1",
"proc-log": "^2.0.0",
"semver": "^7.3.7"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/lru-cache": {
"version": "7.13.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": ">=12"
}
},
"node_modules/npm/node_modules/make-fetch-happen": {
"version": "10.2.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"agentkeepalive": "^4.2.1",
"cacache": "^16.1.0",
"http-cache-semantics": "^4.1.0",
"http-proxy-agent": "^5.0.0",
"https-proxy-agent": "^5.0.0",
"is-lambda": "^1.0.1",
"lru-cache": "^7.7.1",
"minipass": "^3.1.6",
"minipass-collect": "^1.0.2",
"minipass-fetch": "^2.0.3",
"minipass-flush": "^1.0.5",
"minipass-pipeline": "^1.2.4",
"negotiator": "^0.6.3",
"promise-retry": "^2.0.1",
"socks-proxy-agent": "^7.0.0",
"ssri": "^9.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/minimatch": {
"version": "5.1.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/minipass": {
"version": "3.3.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/minipass-collect": {
"version": "1.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"minipass": "^3.0.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/npm/node_modules/minipass-fetch": {
"version": "2.1.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"minipass": "^3.1.6",
"minipass-sized": "^1.0.3",
"minizlib": "^2.1.2"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
},
"optionalDependencies": {
"encoding": "^0.1.13"
}
},
"node_modules/npm/node_modules/minipass-flush": {
"version": "1.0.5",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"minipass": "^3.0.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/npm/node_modules/minipass-json-stream": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"jsonparse": "^1.3.1",
"minipass": "^3.0.0"
}
},
"node_modules/npm/node_modules/minipass-pipeline": {
"version": "1.2.4",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"minipass": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/minipass-sized": {
"version": "1.0.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"minipass": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/minizlib": {
"version": "2.1.2",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"minipass": "^3.0.0",
"yallist": "^4.0.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/npm/node_modules/mkdirp": {
"version": "1.0.4",
"dev": true,
"inBundle": true,
"license": "MIT",
"bin": {
"mkdirp": "bin/cmd.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/mkdirp-infer-owner": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"chownr": "^2.0.0",
"infer-owner": "^1.0.4",
"mkdirp": "^1.0.3"
},
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/ms": {
"version": "2.1.3",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/mute-stream": {
"version": "0.0.8",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/negotiator": {
"version": "0.6.3",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/npm/node_modules/node-gyp": {
"version": "9.1.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"env-paths": "^2.2.0",
"glob": "^7.1.4",
"graceful-fs": "^4.2.6",
"make-fetch-happen": "^10.0.3",
"nopt": "^5.0.0",
"npmlog": "^6.0.0",
"rimraf": "^3.0.2",
"semver": "^7.3.5",
"tar": "^6.1.2",
"which": "^2.0.2"
},
"bin": {
"node-gyp": "bin/node-gyp.js"
},
"engines": {
"node": "^12.22 || ^14.13 || >=16"
}
},
"node_modules/npm/node_modules/node-gyp/node_modules/brace-expansion": {
"version": "1.1.11",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"node_modules/npm/node_modules/node-gyp/node_modules/glob": {
"version": "7.2.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/npm/node_modules/node-gyp/node_modules/minimatch": {
"version": "3.1.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"brace-expansion": "^1.1.7"
},
"engines": {
"node": "*"
}
},
"node_modules/npm/node_modules/node-gyp/node_modules/nopt": {
"version": "5.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"abbrev": "1"
},
"bin": {
"nopt": "bin/nopt.js"
},
"engines": {
"node": ">=6"
}
},
"node_modules/npm/node_modules/nopt": {
"version": "6.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"abbrev": "^1.0.0"
},
"bin": {
"nopt": "bin/nopt.js"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/normalize-package-data": {
"version": "4.0.1",
"dev": true,
"inBundle": true,
"license": "BSD-2-Clause",
"dependencies": {
"hosted-git-info": "^5.0.0",
"is-core-module": "^2.8.1",
"semver": "^7.3.5",
"validate-npm-package-license": "^3.0.4"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-audit-report": {
"version": "3.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"chalk": "^4.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-bundled": {
"version": "2.0.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"npm-normalize-package-bin": "^2.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-bundled/node_modules/npm-normalize-package-bin": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-install-checks": {
"version": "5.0.0",
"dev": true,
"inBundle": true,
"license": "BSD-2-Clause",
"dependencies": {
"semver": "^7.1.1"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-normalize-package-bin": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/npm-package-arg": {
"version": "9.1.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"hosted-git-info": "^5.0.0",
"proc-log": "^2.0.1",
"semver": "^7.3.5",
"validate-npm-package-name": "^4.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-packlist": {
"version": "5.1.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"glob": "^8.0.1",
"ignore-walk": "^5.0.1",
"npm-bundled": "^2.0.0",
"npm-normalize-package-bin": "^2.0.0"
},
"bin": {
"npm-packlist": "bin/index.js"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-packlist/node_modules/npm-normalize-package-bin": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-pick-manifest": {
"version": "7.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"npm-install-checks": "^5.0.0",
"npm-normalize-package-bin": "^2.0.0",
"npm-package-arg": "^9.0.0",
"semver": "^7.3.5"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-pick-manifest/node_modules/npm-normalize-package-bin": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-profile": {
"version": "6.2.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"npm-registry-fetch": "^13.0.1",
"proc-log": "^2.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-registry-fetch": {
"version": "13.3.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"make-fetch-happen": "^10.0.6",
"minipass": "^3.1.6",
"minipass-fetch": "^2.0.3",
"minipass-json-stream": "^1.0.1",
"minizlib": "^2.1.2",
"npm-package-arg": "^9.0.1",
"proc-log": "^2.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/npm-user-validate": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "BSD-2-Clause"
},
"node_modules/npm/node_modules/npmlog": {
"version": "6.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"are-we-there-yet": "^3.0.0",
"console-control-strings": "^1.1.0",
"gauge": "^4.0.3",
"set-blocking": "^2.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/once": {
"version": "1.4.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"wrappy": "1"
}
},
"node_modules/npm/node_modules/opener": {
"version": "1.5.2",
"dev": true,
"inBundle": true,
"license": "(WTFPL OR MIT)",
"bin": {
"opener": "bin/opener-bin.js"
}
},
"node_modules/npm/node_modules/p-map": {
"version": "4.0.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"aggregate-error": "^3.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/npm/node_modules/pacote": {
"version": "13.6.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"@npmcli/git": "^3.0.0",
"@npmcli/installed-package-contents": "^1.0.7",
"@npmcli/promise-spawn": "^3.0.0",
"@npmcli/run-script": "^4.1.0",
"cacache": "^16.0.0",
"chownr": "^2.0.0",
"fs-minipass": "^2.1.0",
"infer-owner": "^1.0.4",
"minipass": "^3.1.6",
"mkdirp": "^1.0.4",
"npm-package-arg": "^9.0.0",
"npm-packlist": "^5.1.0",
"npm-pick-manifest": "^7.0.0",
"npm-registry-fetch": "^13.0.1",
"proc-log": "^2.0.0",
"promise-retry": "^2.0.1",
"read-package-json": "^5.0.0",
"read-package-json-fast": "^2.0.3",
"rimraf": "^3.0.2",
"ssri": "^9.0.0",
"tar": "^6.1.11"
},
"bin": {
"pacote": "lib/bin.js"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/parse-conflict-json": {
"version": "2.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"json-parse-even-better-errors": "^2.3.1",
"just-diff": "^5.0.1",
"just-diff-apply": "^5.2.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/path-is-absolute": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/npm/node_modules/postcss-selector-parser": {
"version": "6.0.10",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"cssesc": "^3.0.0",
"util-deprecate": "^1.0.2"
},
"engines": {
"node": ">=4"
}
},
"node_modules/npm/node_modules/proc-log": {
"version": "2.0.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/promise-all-reject-late": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/npm/node_modules/promise-call-limit": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/npm/node_modules/promise-inflight": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/promise-retry": {
"version": "2.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"err-code": "^2.0.2",
"retry": "^0.12.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/promzard": {
"version": "0.3.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"read": "1"
}
},
"node_modules/npm/node_modules/qrcode-terminal": {
"version": "0.12.0",
"dev": true,
"inBundle": true,
"bin": {
"qrcode-terminal": "bin/qrcode-terminal.js"
}
},
"node_modules/npm/node_modules/read": {
"version": "1.0.7",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"mute-stream": "~0.0.4"
},
"engines": {
"node": ">=0.8"
}
},
"node_modules/npm/node_modules/read-cmd-shim": {
"version": "3.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/read-package-json": {
"version": "5.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"glob": "^8.0.1",
"json-parse-even-better-errors": "^2.3.1",
"normalize-package-data": "^4.0.0",
"npm-normalize-package-bin": "^2.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/read-package-json-fast": {
"version": "2.0.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"json-parse-even-better-errors": "^2.3.0",
"npm-normalize-package-bin": "^1.0.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/read-package-json/node_modules/npm-normalize-package-bin": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/readable-stream": {
"version": "3.6.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/npm/node_modules/readdir-scoped-modules": {
"version": "1.1.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"debuglog": "^1.0.1",
"dezalgo": "^1.0.0",
"graceful-fs": "^4.1.2",
"once": "^1.3.0"
}
},
"node_modules/npm/node_modules/retry": {
"version": "0.12.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">= 4"
}
},
"node_modules/npm/node_modules/rimraf": {
"version": "3.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"glob": "^7.1.3"
},
"bin": {
"rimraf": "bin.js"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/npm/node_modules/rimraf/node_modules/brace-expansion": {
"version": "1.1.11",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"node_modules/npm/node_modules/rimraf/node_modules/glob": {
"version": "7.2.3",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/npm/node_modules/rimraf/node_modules/minimatch": {
"version": "3.1.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"brace-expansion": "^1.1.7"
},
"engines": {
"node": "*"
}
},
"node_modules/npm/node_modules/safe-buffer": {
"version": "5.2.1",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/safer-buffer": {
"version": "2.1.2",
"dev": true,
"inBundle": true,
"license": "MIT",
"optional": true
},
"node_modules/npm/node_modules/semver": {
"version": "7.3.7",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"lru-cache": "^6.0.0"
},
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/semver/node_modules/lru-cache": {
"version": "6.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/npm/node_modules/set-blocking": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/signal-exit": {
"version": "3.0.7",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/smart-buffer": {
"version": "4.2.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"engines": {
"node": ">= 6.0.0",
"npm": ">= 3.0.0"
}
},
"node_modules/npm/node_modules/socks": {
"version": "2.7.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"ip": "^2.0.0",
"smart-buffer": "^4.2.0"
},
"engines": {
"node": ">= 10.13.0",
"npm": ">= 3.0.0"
}
},
"node_modules/npm/node_modules/socks-proxy-agent": {
"version": "7.0.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"agent-base": "^6.0.2",
"debug": "^4.3.3",
"socks": "^2.6.2"
},
"engines": {
"node": ">= 10"
}
},
"node_modules/npm/node_modules/spdx-correct": {
"version": "3.1.1",
"dev": true,
"inBundle": true,
"license": "Apache-2.0",
"dependencies": {
"spdx-expression-parse": "^3.0.0",
"spdx-license-ids": "^3.0.0"
}
},
"node_modules/npm/node_modules/spdx-exceptions": {
"version": "2.3.0",
"dev": true,
"inBundle": true,
"license": "CC-BY-3.0"
},
"node_modules/npm/node_modules/spdx-expression-parse": {
"version": "3.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"spdx-exceptions": "^2.1.0",
"spdx-license-ids": "^3.0.0"
}
},
"node_modules/npm/node_modules/spdx-license-ids": {
"version": "3.0.11",
"dev": true,
"inBundle": true,
"license": "CC0-1.0"
},
"node_modules/npm/node_modules/ssri": {
"version": "9.0.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"minipass": "^3.1.1"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/string_decoder": {
"version": "1.3.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"safe-buffer": "~5.2.0"
}
},
"node_modules/npm/node_modules/string-width": {
"version": "4.2.3",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/strip-ansi": {
"version": "6.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/supports-color": {
"version": "7.2.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/npm/node_modules/tar": {
"version": "6.1.11",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"chownr": "^2.0.0",
"fs-minipass": "^2.0.0",
"minipass": "^3.0.0",
"minizlib": "^2.1.1",
"mkdirp": "^1.0.3",
"yallist": "^4.0.0"
},
"engines": {
"node": ">= 10"
}
},
"node_modules/npm/node_modules/text-table": {
"version": "0.2.0",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/tiny-relative-date": {
"version": "1.3.0",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/treeverse": {
"version": "2.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/unique-filename": {
"version": "2.0.1",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"unique-slug": "^3.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/unique-slug": {
"version": "3.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"imurmurhash": "^0.1.4"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/util-deprecate": {
"version": "1.0.2",
"dev": true,
"inBundle": true,
"license": "MIT"
},
"node_modules/npm/node_modules/validate-npm-package-license": {
"version": "3.0.4",
"dev": true,
"inBundle": true,
"license": "Apache-2.0",
"dependencies": {
"spdx-correct": "^3.0.0",
"spdx-expression-parse": "^3.0.0"
}
},
"node_modules/npm/node_modules/validate-npm-package-name": {
"version": "4.0.0",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"builtins": "^5.0.0"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/walk-up-path": {
"version": "1.0.0",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/wcwidth": {
"version": "1.0.1",
"dev": true,
"inBundle": true,
"license": "MIT",
"dependencies": {
"defaults": "^1.0.3"
}
},
"node_modules/npm/node_modules/which": {
"version": "2.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
"node-which": "bin/node-which"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/npm/node_modules/wide-align": {
"version": "1.1.5",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"string-width": "^1.0.2 || 2 || 3 || 4"
}
},
"node_modules/npm/node_modules/wrappy": {
"version": "1.0.2",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/npm/node_modules/write-file-atomic": {
"version": "4.0.2",
"dev": true,
"inBundle": true,
"license": "ISC",
"dependencies": {
"imurmurhash": "^0.1.4",
"signal-exit": "^3.0.7"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/npm/node_modules/yallist": {
"version": "4.0.0",
"dev": true,
"inBundle": true,
"license": "ISC"
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dev": true,
"dependencies": {
"wrappy": "1"
}
},
"node_modules/onetime": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
"integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
"dev": true,
"dependencies": {
"mimic-fn": "^2.1.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-each-series": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/p-each-series/-/p-each-series-3.0.0.tgz",
"integrity": "sha512-lastgtAdoH9YaLyDa5i5z64q+kzOcQHsQ5SsZJD3q0VEyI8mq872S3geuNbRUQLVAE9siMfgKrpj7MloKFHruw==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-filter": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/p-filter/-/p-filter-2.1.0.tgz",
"integrity": "sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==",
"dev": true,
"dependencies": {
"p-map": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/p-is-promise": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-3.0.0.tgz",
"integrity": "sha512-Wo8VsW4IRQSKVXsJCn7TomUaVtyfjVDn3nUP7kE967BQk0CwFpdbZs0X0uk5sW9mkBa9eNM7hCMaG93WUAwxYQ==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/p-limit": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz",
"integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==",
"dev": true,
"dependencies": {
"yocto-queue": "^1.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-locate": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz",
"integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==",
"dev": true,
"dependencies": {
"p-limit": "^4.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-map": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz",
"integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/p-reduce": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-2.1.0.tgz",
"integrity": "sha512-2USApvnsutq8uoxZBGbbWM0JIYLiEMJ9RlaN7fAzVNb9OZN0SHjjTTfIcb667XynS5Y1VhwDJVDa72TnPzAYWw==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/p-retry": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz",
"integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==",
"dev": true,
"dependencies": {
"@types/retry": "0.12.0",
"retry": "^0.13.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/p-try": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/parent-module": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
"integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
"dev": true,
"dependencies": {
"callsites": "^3.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/parse-json": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
"integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
"dev": true,
"dependencies": {
"@babel/code-frame": "^7.0.0",
"error-ex": "^1.3.1",
"json-parse-even-better-errors": "^2.3.0",
"lines-and-columns": "^1.1.6"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/path-exists": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz",
"integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==",
"dev": true,
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
}
},
"node_modules/path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
"dev": true
},
"node_modules/path-type": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
"integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"dev": true,
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/pify": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
"integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/pkg-conf": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-2.1.0.tgz",
"integrity": "sha512-C+VUP+8jis7EsQZIhDYmS5qlNtjv2yP4SNtjXK9AP1ZcTRlnSfuumaTnRfYZnYgUUYVIKqL0fRvmUGDV2fmp6g==",
"dev": true,
"dependencies": {
"find-up": "^2.0.0",
"load-json-file": "^4.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/pkg-conf/node_modules/find-up": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz",
"integrity": "sha512-NWzkk0jSJtTt08+FBFMvXoeZnOJD+jTtsRmBYbAIzJdX6l7dLgR7CTubCM5/eDdPUBvLCeVasP1brfVR/9/EZQ==",
"dev": true,
"dependencies": {
"locate-path": "^2.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/pkg-conf/node_modules/locate-path": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz",
"integrity": "sha512-NCI2kiDkyR7VeEKm27Kda/iQHyKJe1Bu0FlTbYp3CqJu+9IFe9bLyAjMxf5ZDDbEg+iMPzB5zYyUTSm8wVTKmA==",
"dev": true,
"dependencies": {
"p-locate": "^2.0.0",
"path-exists": "^3.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/pkg-conf/node_modules/p-limit": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz",
"integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==",
"dev": true,
"dependencies": {
"p-try": "^1.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/pkg-conf/node_modules/p-locate": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz",
"integrity": "sha512-nQja7m7gSKuewoVRen45CtVfODR3crN3goVQ0DDZ9N3yHxgpkuBhZqsaiotSQRrADUrne346peY7kT3TSACykg==",
"dev": true,
"dependencies": {
"p-limit": "^1.1.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/pkg-conf/node_modules/p-try": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz",
"integrity": "sha512-U1etNYuMJoIz3ZXSrrySFjsXQTWOx2/jdi86L+2pRvph/qMKL6sbcCYdH23fqsbm8TH2Gn0OybpT4eSFlCVHww==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/pkg-conf/node_modules/path-exists": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
"integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/process-nextick-args": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
"dev": true
},
"node_modules/proto-list": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz",
"integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==",
"dev": true
},
"node_modules/q": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz",
"integrity": "sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw==",
"dev": true,
"engines": {
"node": ">=0.6.0",
"teleport": ">=0.2.0"
}
},
"node_modules/queue-microtask": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
"integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/quick-lru": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz",
"integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/rc": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
"integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
"dev": true,
"dependencies": {
"deep-extend": "^0.6.0",
"ini": "~1.3.0",
"minimist": "^1.2.0",
"strip-json-comments": "~2.0.1"
},
"bin": {
"rc": "cli.js"
}
},
"node_modules/read-pkg": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz",
"integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==",
"dev": true,
"dependencies": {
"@types/normalize-package-data": "^2.4.0",
"normalize-package-data": "^2.5.0",
"parse-json": "^5.0.0",
"type-fest": "^0.6.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/read-pkg-up": {
"version": "9.1.0",
"resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-9.1.0.tgz",
"integrity": "sha512-vaMRR1AC1nrd5CQM0PhlRsO5oc2AAigqr7cCrZ/MW/Rsaflz4RlgzkpL4qoU/z1F6wrbd85iFv1OQj/y5RdGvg==",
"dev": true,
"dependencies": {
"find-up": "^6.3.0",
"read-pkg": "^7.1.0",
"type-fest": "^2.5.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/read-pkg-up/node_modules/read-pkg": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-7.1.0.tgz",
"integrity": "sha512-5iOehe+WF75IccPc30bWTbpdDQLOCc3Uu8bi3Dte3Eueij81yx1Mrufk8qBx/YAbR4uL1FdUr+7BKXDwEtisXg==",
"dev": true,
"dependencies": {
"@types/normalize-package-data": "^2.4.1",
"normalize-package-data": "^3.0.2",
"parse-json": "^5.2.0",
"type-fest": "^2.0.0"
},
"engines": {
"node": ">=12.20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/read-pkg-up/node_modules/type-fest": {
"version": "2.19.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz",
"integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==",
"dev": true,
"engines": {
"node": ">=12.20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/read-pkg/node_modules/hosted-git-info": {
"version": "2.8.9",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz",
"integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==",
"dev": true
},
"node_modules/read-pkg/node_modules/normalize-package-data": {
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz",
"integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==",
"dev": true,
"dependencies": {
"hosted-git-info": "^2.1.4",
"resolve": "^1.10.0",
"semver": "2 || 3 || 4 || 5",
"validate-npm-package-license": "^3.0.1"
}
},
"node_modules/read-pkg/node_modules/semver": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
"integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
"dev": true,
"bin": {
"semver": "bin/semver"
}
},
"node_modules/read-pkg/node_modules/type-fest": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz",
"integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/readable-stream": {
"version": "2.3.7",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
"integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
"dev": true,
"dependencies": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.3",
"isarray": "~1.0.0",
"process-nextick-args": "~2.0.0",
"safe-buffer": "~5.1.1",
"string_decoder": "~1.1.1",
"util-deprecate": "~1.0.1"
}
},
"node_modules/redent": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz",
"integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==",
"dev": true,
"dependencies": {
"indent-string": "^4.0.0",
"strip-indent": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/redeyed": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/redeyed/-/redeyed-2.1.1.tgz",
"integrity": "sha512-FNpGGo1DycYAdnrKFxCMmKYgo/mILAqtRYbkdQD8Ep/Hk2PQ5+aEAEx+IU713RTDmuBaH0c8P5ZozurNu5ObRQ==",
"dev": true,
"dependencies": {
"esprima": "~4.0.0"
}
},
"node_modules/registry-auth-token": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.0.1.tgz",
"integrity": "sha512-UfxVOj8seK1yaIOiieV4FIP01vfBDLsY0H9sQzi9EbbUdJiuuBjJgLa1DpImXMNPnVkBD4eVxTEXcrZA6kfpJA==",
"dev": true,
"dependencies": {
"@pnpm/npm-conf": "^1.0.4"
},
"engines": {
"node": ">=14"
}
},
"node_modules/require-directory": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
"integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/resolve": {
"version": "1.22.1",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz",
"integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==",
"dev": true,
"dependencies": {
"is-core-module": "^2.9.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/resolve-from": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
"integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/retry": {
"version": "0.13.1",
"resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
"integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==",
"dev": true,
"engines": {
"node": ">= 4"
}
},
"node_modules/reusify": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
"integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
"dev": true,
"engines": {
"iojs": ">=1.0.0",
"node": ">=0.10.0"
}
},
"node_modules/rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dev": true,
"dependencies": {
"glob": "^7.1.3"
},
"bin": {
"rimraf": "bin.js"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
"integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"queue-microtask": "^1.2.2"
}
},
"node_modules/safe-buffer": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
"dev": true
},
"node_modules/semantic-release": {
"version": "20.1.0",
"resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-20.1.0.tgz",
"integrity": "sha512-+9+n6RIr0Fz0F53cXrjpawxWlUg3O7/qr1jF9lrE+/v6WqwBrSWnavVHTPaf2WLerET2EngoqI0M4pahkKl6XQ==",
"dev": true,
"dependencies": {
"@semantic-release/commit-analyzer": "^9.0.2",
"@semantic-release/error": "^3.0.0",
"@semantic-release/github": "^8.0.0",
"@semantic-release/npm": "^9.0.0",
"@semantic-release/release-notes-generator": "^10.0.0",
"aggregate-error": "^4.0.1",
"cosmiconfig": "^8.0.0",
"debug": "^4.0.0",
"env-ci": "^8.0.0",
"execa": "^6.1.0",
"figures": "^5.0.0",
"find-versions": "^5.1.0",
"get-stream": "^6.0.0",
"git-log-parser": "^1.2.0",
"hook-std": "^3.0.0",
"hosted-git-info": "^6.0.0",
"lodash-es": "^4.17.21",
"marked": "^4.1.0",
"marked-terminal": "^5.1.1",
"micromatch": "^4.0.2",
"p-each-series": "^3.0.0",
"p-reduce": "^3.0.0",
"read-pkg-up": "^9.1.0",
"resolve-from": "^5.0.0",
"semver": "^7.3.2",
"semver-diff": "^4.0.0",
"signale": "^1.2.1",
"yargs": "^17.5.1"
},
"bin": {
"semantic-release": "bin/semantic-release.js"
},
"engines": {
"node": ">=18"
}
},
"node_modules/semantic-release/node_modules/aggregate-error": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-4.0.1.tgz",
"integrity": "sha512-0poP0T7el6Vq3rstR8Mn4V/IQrpBLO6POkUSrN7RhyY+GF/InCFShQzsQ39T25gkHhLgSLByyAz+Kjb+c2L98w==",
"dev": true,
"dependencies": {
"clean-stack": "^4.0.0",
"indent-string": "^5.0.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/clean-stack": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-4.2.0.tgz",
"integrity": "sha512-LYv6XPxoyODi36Dp976riBtSY27VmFo+MKqEU9QCCWyTrdEPDog+RWA7xQWHi6Vbp61j5c4cdzzX1NidnwtUWg==",
"dev": true,
"dependencies": {
"escape-string-regexp": "5.0.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/escape-string-regexp": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz",
"integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/execa": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/execa/-/execa-6.1.0.tgz",
"integrity": "sha512-QVWlX2e50heYJcCPG0iWtf8r0xjEYfz/OYLGDYH+IyjWezzPNxz63qNFOu0l4YftGWuizFVZHHs8PrLU5p2IDA==",
"dev": true,
"dependencies": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.1",
"human-signals": "^3.0.1",
"is-stream": "^3.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^5.1.0",
"onetime": "^6.0.0",
"signal-exit": "^3.0.7",
"strip-final-newline": "^3.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sindresorhus/execa?sponsor=1"
}
},
"node_modules/semantic-release/node_modules/human-signals": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-3.0.1.tgz",
"integrity": "sha512-rQLskxnM/5OCldHo+wNXbpVgDn5A17CUoKX+7Sokwaknlq7CdSnphy0W39GU8dw59XiCXmFXDg4fRuckQRKewQ==",
"dev": true,
"engines": {
"node": ">=12.20.0"
}
},
"node_modules/semantic-release/node_modules/indent-string": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/indent-string/-/indent-string-5.0.0.tgz",
"integrity": "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/is-stream": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
"integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
"dev": true,
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/mimic-fn": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
"integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/npm-run-path": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz",
"integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==",
"dev": true,
"dependencies": {
"path-key": "^4.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/onetime": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
"integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
"dev": true,
"dependencies": {
"mimic-fn": "^4.0.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/p-reduce": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-3.0.0.tgz",
"integrity": "sha512-xsrIUgI0Kn6iyDYm9StOpOeK29XM1aboGji26+QEortiFST1hGZaUQOLhtEbqHErPpGW/aSz6allwK2qcptp0Q==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/path-key": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
"integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semantic-release/node_modules/strip-final-newline": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
"integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semver": {
"version": "7.3.8",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz",
"integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
},
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/semver-diff": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz",
"integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==",
"dev": true,
"dependencies": {
"semver": "^7.3.5"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semver-regex": {
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-4.0.5.tgz",
"integrity": "sha512-hunMQrEy1T6Jr2uEVjrAIqjwWcQTgOAcIM52C8MY1EZSD3DDNft04XzvYKPqjED65bNVVko0YI38nYeEHCX3yw==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semver/node_modules/lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dev": true,
"dependencies": {
"shebang-regex": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/signal-exit": {
"version": "3.0.7",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
"dev": true
},
"node_modules/signale": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/signale/-/signale-1.4.0.tgz",
"integrity": "sha512-iuh+gPf28RkltuJC7W5MRi6XAjTDCAPC/prJUpQoG4vIP3MJZ+GTydVnodXA7pwvTKb2cA0m9OFZW/cdWy/I/w==",
"dev": true,
"dependencies": {
"chalk": "^2.3.2",
"figures": "^2.0.0",
"pkg-conf": "^2.1.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/signale/node_modules/figures": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz",
"integrity": "sha512-Oa2M9atig69ZkfwiApY8F2Yy+tzMbazyvqv21R0NsSC8floSOC09BbT1ITWAdoMGQvJ/aZnR1KMwdx9tvHnTNA==",
"dev": true,
"dependencies": {
"escape-string-regexp": "^1.0.5"
},
"engines": {
"node": ">=4"
}
},
"node_modules/slash": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
"integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/spawn-error-forwarder": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/spawn-error-forwarder/-/spawn-error-forwarder-1.0.0.tgz",
"integrity": "sha512-gRjMgK5uFjbCvdibeGJuy3I5OYz6VLoVdsOJdA6wV0WlfQVLFueoqMxwwYD9RODdgb6oUIvlRlsyFSiQkMKu0g==",
"dev": true
},
"node_modules/spdx-correct": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz",
"integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==",
"dev": true,
"dependencies": {
"spdx-expression-parse": "^3.0.0",
"spdx-license-ids": "^3.0.0"
}
},
"node_modules/spdx-exceptions": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz",
"integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==",
"dev": true
},
"node_modules/spdx-expression-parse": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz",
"integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==",
"dev": true,
"dependencies": {
"spdx-exceptions": "^2.1.0",
"spdx-license-ids": "^3.0.0"
}
},
"node_modules/spdx-license-ids": {
"version": "3.0.12",
"resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.12.tgz",
"integrity": "sha512-rr+VVSXtRhO4OHbXUiAF7xW3Bo9DuuF6C5jH+q/x15j2jniycgKbxU09Hr0WqlSLUs4i4ltHGXqTe7VHclYWyA==",
"dev": true
},
"node_modules/split": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz",
"integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==",
"dev": true,
"dependencies": {
"through": "2"
},
"engines": {
"node": "*"
}
},
"node_modules/split2": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/split2/-/split2-3.2.2.tgz",
"integrity": "sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==",
"dev": true,
"dependencies": {
"readable-stream": "^3.0.0"
}
},
"node_modules/split2/node_modules/readable-stream": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
"integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
"dev": true,
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/stream-combiner2": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/stream-combiner2/-/stream-combiner2-1.1.1.tgz",
"integrity": "sha512-3PnJbYgS56AeWgtKF5jtJRT6uFJe56Z0Hc5Ngg/6sI6rIt8iiMBTa9cvdyFfpMQjaVHr8dusbNeFGIIonxOvKw==",
"dev": true,
"dependencies": {
"duplexer2": "~0.1.0",
"readable-stream": "^2.0.2"
}
},
"node_modules/string_decoder": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
"dev": true,
"dependencies": {
"safe-buffer": "~5.1.0"
}
},
"node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-bom": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
"integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/strip-final-newline": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
"integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/strip-indent": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz",
"integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==",
"dev": true,
"dependencies": {
"min-indent": "^1.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-json-comments": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
"integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/supports-color": {
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
"dev": true,
"dependencies": {
"has-flag": "^3.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/supports-hyperlinks": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz",
"integrity": "sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==",
"dev": true,
"dependencies": {
"has-flag": "^4.0.0",
"supports-color": "^7.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/supports-hyperlinks/node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/supports-hyperlinks/node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/supports-preserve-symlinks-flag": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
"integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/temp-dir": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-2.0.0.tgz",
"integrity": "sha512-aoBAniQmmwtcKp/7BzsH8Cxzv8OL736p7v1ihGb5e9DJ9kTwGWHrQrVB5+lfVDzfGrdRzXch+ig7LHaY1JTOrg==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/tempy": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/tempy/-/tempy-1.0.1.tgz",
"integrity": "sha512-biM9brNqxSc04Ee71hzFbryD11nX7VPhQQY32AdDmjFvodsRFz/3ufeoTZ6uYkRFfGo188tENcASNs3vTdsM0w==",
"dev": true,
"dependencies": {
"del": "^6.0.0",
"is-stream": "^2.0.0",
"temp-dir": "^2.0.0",
"type-fest": "^0.16.0",
"unique-string": "^2.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/tempy/node_modules/type-fest": {
"version": "0.16.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.16.0.tgz",
"integrity": "sha512-eaBzG6MxNzEn9kiwvtre90cXaNLkmadMWa1zQMs3XORCXNbsH/OewwbxC5ia9dCxIxnTAsSxXJaa/p5y8DlvJg==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/text-extensions": {
"version": "1.9.0",
"resolved": "https://registry.npmjs.org/text-extensions/-/text-extensions-1.9.0.tgz",
"integrity": "sha512-wiBrwC1EhBelW12Zy26JeOUkQ5mRu+5o8rpsJk5+2t+Y5vE7e842qtZDQ2g1NpX/29HdyFeJ4nSIhI47ENSxlQ==",
"dev": true,
"engines": {
"node": ">=0.10"
}
},
"node_modules/through": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
"integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==",
"dev": true
},
"node_modules/through2": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/through2/-/through2-4.0.2.tgz",
"integrity": "sha512-iOqSav00cVxEEICeD7TjLB1sueEL+81Wpzp2bY17uZjZN0pWZPuo4suZ/61VujxmqSGFfgOcNuTZ85QJwNZQpw==",
"dev": true,
"dependencies": {
"readable-stream": "3"
}
},
"node_modules/through2/node_modules/readable-stream": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
"integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
"dev": true,
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"dependencies": {
"is-number": "^7.0.0"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
"dev": true
},
"node_modules/traverse": {
"version": "0.6.7",
"resolved": "https://registry.npmjs.org/traverse/-/traverse-0.6.7.tgz",
"integrity": "sha512-/y956gpUo9ZNCb99YjxG7OaslxZWHfCHAUUfshwqOXmxUIvqLjVO581BT+gM59+QV9tFe6/CGG53tsA1Y7RSdg==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/trim-newlines": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz",
"integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/type-fest": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz",
"integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/uglify-js": {
"version": "3.17.4",
"resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.17.4.tgz",
"integrity": "sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==",
"dev": true,
"optional": true,
"bin": {
"uglifyjs": "bin/uglifyjs"
},
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/unique-string": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz",
"integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==",
"dev": true,
"dependencies": {
"crypto-random-string": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/universal-user-agent": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz",
"integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==",
"dev": true
},
"node_modules/universalify": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
"integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==",
"dev": true,
"engines": {
"node": ">= 10.0.0"
}
},
"node_modules/url-join": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz",
"integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==",
"dev": true
},
"node_modules/util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
"dev": true
},
"node_modules/validate-npm-package-license": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz",
"integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==",
"dev": true,
"dependencies": {
"spdx-correct": "^3.0.0",
"spdx-expression-parse": "^3.0.0"
}
},
"node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
"dev": true
},
"node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"dev": true,
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dev": true,
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
"node-which": "bin/node-which"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/wordwrap": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
"integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==",
"dev": true
},
"node_modules/wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dev": true,
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/wrap-ansi/node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/wrap-ansi/node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/wrap-ansi/node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"dev": true
},
"node_modules/xtend": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
"dev": true,
"engines": {
"node": ">=0.4"
}
},
"node_modules/y18n": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
"integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
"dev": true,
"engines": {
"node": ">=10"
}
},
"node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
"dev": true
},
"node_modules/yargs": {
"version": "17.7.1",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.1.tgz",
"integrity": "sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==",
"dev": true,
"dependencies": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/yargs-parser": {
"version": "20.2.9",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz",
"integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==",
"dev": true,
"engines": {
"node": ">=10"
}
},
"node_modules/yargs/node_modules/yargs-parser": {
"version": "21.1.1",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
"integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
"dev": true,
"engines": {
"node": ">=12"
}
},
"node_modules/yocto-queue": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz",
"integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==",
"dev": true,
"engines": {
"node": ">=12.20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
}
},
"dependencies": {
"@babel/code-frame": {
"version": "7.18.6",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz",
"integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==",
"dev": true,
"requires": {
"@babel/highlight": "^7.18.6"
}
},
"@babel/helper-validator-identifier": {
"version": "7.19.1",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz",
"integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==",
"dev": true
},
"@babel/highlight": {
"version": "7.18.6",
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz",
"integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==",
"dev": true,
"requires": {
"@babel/helper-validator-identifier": "^7.18.6",
"chalk": "^2.0.0",
"js-tokens": "^4.0.0"
}
},
"@colors/colors": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz",
"integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==",
"dev": true,
"optional": true
},
"@nodelib/fs.scandir": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
"integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
"dev": true,
"requires": {
"@nodelib/fs.stat": "2.0.5",
"run-parallel": "^1.1.9"
}
},
"@nodelib/fs.stat": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
"integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
"dev": true
},
"@nodelib/fs.walk": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
"integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
"dev": true,
"requires": {
"@nodelib/fs.scandir": "2.1.5",
"fastq": "^1.6.0"
}
},
"@octokit/auth-token": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-3.0.3.tgz",
"integrity": "sha512-/aFM2M4HVDBT/jjDBa84sJniv1t9Gm/rLkalaz9htOm+L+8JMj1k9w0CkUdcxNyNxZPlTxKPVko+m1VlM58ZVA==",
"dev": true,
"requires": {
"@octokit/types": "^9.0.0"
}
},
"@octokit/core": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.0.tgz",
"integrity": "sha512-AgvDRUg3COpR82P7PBdGZF/NNqGmtMq2NiPqeSsDIeCfYFOZ9gddqWNQHnFdEUf+YwOj4aZYmJnlPp7OXmDIDg==",
"dev": true,
"requires": {
"@octokit/auth-token": "^3.0.0",
"@octokit/graphql": "^5.0.0",
"@octokit/request": "^6.0.0",
"@octokit/request-error": "^3.0.0",
"@octokit/types": "^9.0.0",
"before-after-hook": "^2.2.0",
"universal-user-agent": "^6.0.0"
}
},
"@octokit/endpoint": {
"version": "7.0.5",
"resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-7.0.5.tgz",
"integrity": "sha512-LG4o4HMY1Xoaec87IqQ41TQ+glvIeTKqfjkCEmt5AIwDZJwQeVZFIEYXrYY6yLwK+pAScb9Gj4q+Nz2qSw1roA==",
"dev": true,
"requires": {
"@octokit/types": "^9.0.0",
"is-plain-object": "^5.0.0",
"universal-user-agent": "^6.0.0"
}
},
"@octokit/graphql": {
"version": "5.0.5",
"resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-5.0.5.tgz",
"integrity": "sha512-Qwfvh3xdqKtIznjX9lz2D458r7dJPP8l6r4GQkIdWQouZwHQK0mVT88uwiU2bdTU2OtT1uOlKpRciUWldpG0yQ==",
"dev": true,
"requires": {
"@octokit/request": "^6.0.0",
"@octokit/types": "^9.0.0",
"universal-user-agent": "^6.0.0"
}
},
"@octokit/openapi-types": {
"version": "16.0.0",
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-16.0.0.tgz",
"integrity": "sha512-JbFWOqTJVLHZSUUoF4FzAZKYtqdxWu9Z5m2QQnOyEa04fOFljvyh7D3GYKbfuaSWisqehImiVIMG4eyJeP5VEA==",
"dev": true
},
"@octokit/plugin-paginate-rest": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-6.0.0.tgz",
"integrity": "sha512-Sq5VU1PfT6/JyuXPyt04KZNVsFOSBaYOAq2QRZUwzVlI10KFvcbUo8lR258AAQL1Et60b0WuVik+zOWKLuDZxw==",
"dev": true,
"requires": {
"@octokit/types": "^9.0.0"
}
},
"@octokit/plugin-request-log": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.4.tgz",
"integrity": "sha512-mLUsMkgP7K/cnFEw07kWqXGF5LKrOkD+lhCrKvPHXWDywAwuDUeDwWBpc69XK3pNX0uKiVt8g5z96PJ6z9xCFA==",
"dev": true,
"requires": {}
},
"@octokit/plugin-rest-endpoint-methods": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-7.0.1.tgz",
"integrity": "sha512-pnCaLwZBudK5xCdrR823xHGNgqOzRnJ/mpC/76YPpNP7DybdsJtP7mdOwh+wYZxK5jqeQuhu59ogMI4NRlBUvA==",
"dev": true,
"requires": {
"@octokit/types": "^9.0.0",
"deprecation": "^2.3.1"
}
},
"@octokit/request": {
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/@octokit/request/-/request-6.2.3.tgz",
"integrity": "sha512-TNAodj5yNzrrZ/VxP+H5HiYaZep0H3GU0O7PaF+fhDrt8FPrnkei9Aal/txsN/1P7V3CPiThG0tIvpPDYUsyAA==",
"dev": true,
"requires": {
"@octokit/endpoint": "^7.0.0",
"@octokit/request-error": "^3.0.0",
"@octokit/types": "^9.0.0",
"is-plain-object": "^5.0.0",
"node-fetch": "^2.6.7",
"universal-user-agent": "^6.0.0"
}
},
"@octokit/request-error": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-3.0.3.tgz",
"integrity": "sha512-crqw3V5Iy2uOU5Np+8M/YexTlT8zxCfI+qu+LxUB7SZpje4Qmx3mub5DfEKSO8Ylyk0aogi6TYdf6kxzh2BguQ==",
"dev": true,
"requires": {
"@octokit/types": "^9.0.0",
"deprecation": "^2.0.0",
"once": "^1.4.0"
}
},
"@octokit/rest": {
"version": "19.0.7",
"resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-19.0.7.tgz",
"integrity": "sha512-HRtSfjrWmWVNp2uAkEpQnuGMJsu/+dBr47dRc5QVgsCbnIc1+GFEaoKBWkYG+zjrsHpSqcAElMio+n10c0b5JA==",
"dev": true,
"requires": {
"@octokit/core": "^4.1.0",
"@octokit/plugin-paginate-rest": "^6.0.0",
"@octokit/plugin-request-log": "^1.0.4",
"@octokit/plugin-rest-endpoint-methods": "^7.0.0"
}
},
"@octokit/types": {
"version": "9.0.0",
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-9.0.0.tgz",
"integrity": "sha512-LUewfj94xCMH2rbD5YJ+6AQ4AVjFYTgpp6rboWM5T7N3IsIF65SBEOVcYMGAEzO/kKNiNaW4LoWtoThOhH06gw==",
"dev": true,
"requires": {
"@octokit/openapi-types": "^16.0.0"
}
},
"@pnpm/network.ca-file": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz",
"integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==",
"dev": true,
"requires": {
"graceful-fs": "4.2.10"
}
},
"@pnpm/npm-conf": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-1.0.5.tgz",
"integrity": "sha512-hD8ml183638O3R6/Txrh0L8VzGOrFXgRtRDG4qQC4tONdZ5Z1M+tlUUDUvrjYdmK6G+JTBTeaCLMna11cXzi8A==",
"dev": true,
"requires": {
"@pnpm/network.ca-file": "^1.0.1",
"config-chain": "^1.1.11"
}
},
"@semantic-release/commit-analyzer": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/@semantic-release/commit-analyzer/-/commit-analyzer-9.0.2.tgz",
"integrity": "sha512-E+dr6L+xIHZkX4zNMe6Rnwg4YQrWNXK+rNsvwOPpdFppvZO1olE2fIgWhv89TkQErygevbjsZFSIxp+u6w2e5g==",
"dev": true,
"requires": {
"conventional-changelog-angular": "^5.0.0",
"conventional-commits-filter": "^2.0.0",
"conventional-commits-parser": "^3.2.3",
"debug": "^4.0.0",
"import-from": "^4.0.0",
"lodash": "^4.17.4",
"micromatch": "^4.0.2"
}
},
"@semantic-release/error": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-3.0.0.tgz",
"integrity": "sha512-5hiM4Un+tpl4cKw3lV4UgzJj+SmfNIDCLLw0TepzQxz9ZGV5ixnqkzIVF+3tp0ZHgcMKE+VNGHJjEeyFG2dcSw==",
"dev": true
},
"@semantic-release/exec": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/@semantic-release/exec/-/exec-6.0.3.tgz",
"integrity": "sha512-bxAq8vLOw76aV89vxxICecEa8jfaWwYITw6X74zzlO0mc/Bgieqx9kBRz9z96pHectiTAtsCwsQcUyLYWnp3VQ==",
"dev": true,
"requires": {
"@semantic-release/error": "^3.0.0",
"aggregate-error": "^3.0.0",
"debug": "^4.0.0",
"execa": "^5.0.0",
"lodash": "^4.17.4",
"parse-json": "^5.0.0"
}
},
"@semantic-release/git": {
"version": "10.0.1",
"resolved": "https://registry.npmjs.org/@semantic-release/git/-/git-10.0.1.tgz",
"integrity": "sha512-eWrx5KguUcU2wUPaO6sfvZI0wPafUKAMNC18aXY4EnNcrZL86dEmpNVnC9uMpGZkmZJ9EfCVJBQx4pV4EMGT1w==",
"dev": true,
"requires": {
"@semantic-release/error": "^3.0.0",
"aggregate-error": "^3.0.0",
"debug": "^4.0.0",
"dir-glob": "^3.0.0",
"execa": "^5.0.0",
"lodash": "^4.17.4",
"micromatch": "^4.0.0",
"p-reduce": "^2.0.0"
}
},
"@semantic-release/github": {
"version": "8.0.7",
"resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-8.0.7.tgz",
"integrity": "sha512-VtgicRIKGvmTHwm//iqTh/5NGQwsncOMR5vQK9pMT92Aem7dv37JFKKRuulUsAnUOIlO4G8wH3gPiBAA0iW0ww==",
"dev": true,
"requires": {
"@octokit/rest": "^19.0.0",
"@semantic-release/error": "^3.0.0",
"aggregate-error": "^3.0.0",
"bottleneck": "^2.18.1",
"debug": "^4.0.0",
"dir-glob": "^3.0.0",
"fs-extra": "^11.0.0",
"globby": "^11.0.0",
"http-proxy-agent": "^5.0.0",
"https-proxy-agent": "^5.0.0",
"issue-parser": "^6.0.0",
"lodash": "^4.17.4",
"mime": "^3.0.0",
"p-filter": "^2.0.0",
"p-retry": "^4.0.0",
"url-join": "^4.0.0"
}
},
"@semantic-release/npm": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-9.0.2.tgz",
"integrity": "sha512-zgsynF6McdzxPnFet+a4iO9HpAlARXOM5adz7VGVCvj0ne8wtL2ZOQoDV2wZPDmdEotDIbVeJjafhelZjs9j6g==",
"dev": true,
"requires": {
"@semantic-release/error": "^3.0.0",
"aggregate-error": "^3.0.0",
"execa": "^5.0.0",
"fs-extra": "^11.0.0",
"lodash": "^4.17.15",
"nerf-dart": "^1.0.0",
"normalize-url": "^6.0.0",
"npm": "^8.3.0",
"rc": "^1.2.8",
"read-pkg": "^5.0.0",
"registry-auth-token": "^5.0.0",
"semver": "^7.1.2",
"tempy": "^1.0.0"
}
},
"@semantic-release/release-notes-generator": {
"version": "10.0.3",
"resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-10.0.3.tgz",
"integrity": "sha512-k4x4VhIKneOWoBGHkx0qZogNjCldLPRiAjnIpMnlUh6PtaWXp/T+C9U7/TaNDDtgDa5HMbHl4WlREdxHio6/3w==",
"dev": true,
"requires": {
"conventional-changelog-angular": "^5.0.0",
"conventional-changelog-writer": "^5.0.0",
"conventional-commits-filter": "^2.0.0",
"conventional-commits-parser": "^3.2.3",
"debug": "^4.0.0",
"get-stream": "^6.0.0",
"import-from": "^4.0.0",
"into-stream": "^6.0.0",
"lodash": "^4.17.4",
"read-pkg-up": "^7.0.0"
},
"dependencies": {
"find-up": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
"integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
"dev": true,
"requires": {
"locate-path": "^5.0.0",
"path-exists": "^4.0.0"
}
},
"locate-path": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
"integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
"dev": true,
"requires": {
"p-locate": "^4.1.0"
}
},
"p-limit": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
"integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
"dev": true,
"requires": {
"p-try": "^2.0.0"
}
},
"p-locate": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
"integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
"dev": true,
"requires": {
"p-limit": "^2.2.0"
}
},
"path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"dev": true
},
"read-pkg-up": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz",
"integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==",
"dev": true,
"requires": {
"find-up": "^4.1.0",
"read-pkg": "^5.2.0",
"type-fest": "^0.8.1"
}
},
"type-fest": {
"version": "0.8.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
"integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
"dev": true
}
}
},
"@tootallnate/once": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz",
"integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==",
"dev": true
},
"@types/minimist": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz",
"integrity": "sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==",
"dev": true
},
"@types/normalize-package-data": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz",
"integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==",
"dev": true
},
"@types/retry": {
"version": "0.12.0",
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
"integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==",
"dev": true
},
"agent-base": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
"integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
"dev": true,
"requires": {
"debug": "4"
}
},
"aggregate-error": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz",
"integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==",
"dev": true,
"requires": {
"clean-stack": "^2.0.0",
"indent-string": "^4.0.0"
}
},
"ansi-escapes": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-5.0.0.tgz",
"integrity": "sha512-5GFMVX8HqE/TB+FuBJGuO5XG0WrsA6ptUqoODaT/n9mmUaZFkqnBueB4leqGBCmrUHnCnC4PCZTCd0E7QQ83bA==",
"dev": true,
"requires": {
"type-fest": "^1.0.2"
}
},
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"ansi-styles": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
"dev": true,
"requires": {
"color-convert": "^1.9.0"
}
},
"ansicolors": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/ansicolors/-/ansicolors-0.3.2.tgz",
"integrity": "sha512-QXu7BPrP29VllRxH8GwB7x5iX5qWKAAMLqKQGWTeLWVlNHNOpVMJ91dsxQAIWXpjuW5wqvxu3Jd/nRjrJ+0pqg==",
"dev": true
},
"argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
"dev": true
},
"argv-formatter": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/argv-formatter/-/argv-formatter-1.0.0.tgz",
"integrity": "sha512-F2+Hkm9xFaRg+GkaNnbwXNDV5O6pnCFEmqyhvfC/Ic5LbgOWjJh3L+mN/s91rxVL3znE7DYVpW0GJFT+4YBgWw==",
"dev": true
},
"array-ify": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/array-ify/-/array-ify-1.0.0.tgz",
"integrity": "sha512-c5AMf34bKdvPhQ7tBGhqkgKNUzMr4WUs+WDtC2ZUGOUncbxKMTvqxYctiseW3+L4bA8ec+GcZ6/A/FW4m8ukng==",
"dev": true
},
"array-union": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
"integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
"dev": true
},
"arrify": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz",
"integrity": "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==",
"dev": true
},
"balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"dev": true
},
"before-after-hook": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz",
"integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==",
"dev": true
},
"bottleneck": {
"version": "2.19.5",
"resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz",
"integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==",
"dev": true
},
"brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"dev": true,
"requires": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"braces": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
"dev": true,
"requires": {
"fill-range": "^7.0.1"
}
},
"callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
"integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
"dev": true
},
"camelcase": {
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
"integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
"dev": true
},
"camelcase-keys": {
"version": "6.2.2",
"resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz",
"integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==",
"dev": true,
"requires": {
"camelcase": "^5.3.1",
"map-obj": "^4.0.0",
"quick-lru": "^4.0.1"
}
},
"cardinal": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/cardinal/-/cardinal-2.1.1.tgz",
"integrity": "sha512-JSr5eOgoEymtYHBjNWyjrMqet9Am2miJhlfKNdqLp6zoeAh0KN5dRAcxlecj5mAJrmQomgiOBj35xHLrFjqBpw==",
"dev": true,
"requires": {
"ansicolors": "~0.3.2",
"redeyed": "~2.1.0"
}
},
"chalk": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
"dev": true,
"requires": {
"ansi-styles": "^3.2.1",
"escape-string-regexp": "^1.0.5",
"supports-color": "^5.3.0"
}
},
"clean-stack": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz",
"integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==",
"dev": true
},
"cli-table3": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.3.tgz",
"integrity": "sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==",
"dev": true,
"requires": {
"@colors/colors": "1.5.0",
"string-width": "^4.2.0"
}
},
"cliui": {
"version": "8.0.1",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
"integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
"dev": true,
"requires": {
"string-width": "^4.2.0",
"strip-ansi": "^6.0.1",
"wrap-ansi": "^7.0.0"
}
},
"color-convert": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
"dev": true,
"requires": {
"color-name": "1.1.3"
}
},
"color-name": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
"dev": true
},
"compare-func": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/compare-func/-/compare-func-2.0.0.tgz",
"integrity": "sha512-zHig5N+tPWARooBnb0Zx1MFcdfpyJrfTJ3Y5L+IFvUm8rM74hHz66z0gw0x4tijh5CorKkKUCnW82R2vmpeCRA==",
"dev": true,
"requires": {
"array-ify": "^1.0.0",
"dot-prop": "^5.1.0"
}
},
"concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
"dev": true
},
"config-chain": {
"version": "1.1.13",
"resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz",
"integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==",
"dev": true,
"requires": {
"ini": "^1.3.4",
"proto-list": "~1.2.1"
}
},
"conventional-changelog-angular": {
"version": "5.0.13",
"resolved": "https://registry.npmjs.org/conventional-changelog-angular/-/conventional-changelog-angular-5.0.13.tgz",
"integrity": "sha512-i/gipMxs7s8L/QeuavPF2hLnJgH6pEZAttySB6aiQLWcX3puWDL3ACVmvBhJGxnAy52Qc15ua26BufY6KpmrVA==",
"dev": true,
"requires": {
"compare-func": "^2.0.0",
"q": "^1.5.1"
}
},
"conventional-changelog-writer": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/conventional-changelog-writer/-/conventional-changelog-writer-5.0.1.tgz",
"integrity": "sha512-5WsuKUfxW7suLblAbFnxAcrvf6r+0b7GvNaWUwUIk0bXMnENP/PEieGKVUQrjPqwPT4o3EPAASBXiY6iHooLOQ==",
"dev": true,
"requires": {
"conventional-commits-filter": "^2.0.7",
"dateformat": "^3.0.0",
"handlebars": "^4.7.7",
"json-stringify-safe": "^5.0.1",
"lodash": "^4.17.15",
"meow": "^8.0.0",
"semver": "^6.0.0",
"split": "^1.0.0",
"through2": "^4.0.0"
},
"dependencies": {
"semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"dev": true
}
}
},
"conventional-commits-filter": {
"version": "2.0.7",
"resolved": "https://registry.npmjs.org/conventional-commits-filter/-/conventional-commits-filter-2.0.7.tgz",
"integrity": "sha512-ASS9SamOP4TbCClsRHxIHXRfcGCnIoQqkvAzCSbZzTFLfcTqJVugB0agRgsEELsqaeWgsXv513eS116wnlSSPA==",
"dev": true,
"requires": {
"lodash.ismatch": "^4.4.0",
"modify-values": "^1.0.0"
}
},
"conventional-commits-parser": {
"version": "3.2.4",
"resolved": "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-3.2.4.tgz",
"integrity": "sha512-nK7sAtfi+QXbxHCYfhpZsfRtaitZLIA6889kFIouLvz6repszQDgxBu7wf2WbU+Dco7sAnNCJYERCwt54WPC2Q==",
"dev": true,
"requires": {
"is-text-path": "^1.0.1",
"JSONStream": "^1.0.4",
"lodash": "^4.17.15",
"meow": "^8.0.0",
"split2": "^3.0.0",
"through2": "^4.0.0"
}
},
"core-util-is": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
"integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==",
"dev": true
},
"cosmiconfig": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.0.0.tgz",
"integrity": "sha512-da1EafcpH6b/TD8vDRaWV7xFINlHlF6zKsGwS1TsuVJTZRkquaS5HTMq7uq6h31619QjbsYl21gVDOm32KM1vQ==",
"dev": true,
"requires": {
"import-fresh": "^3.2.1",
"js-yaml": "^4.1.0",
"parse-json": "^5.0.0",
"path-type": "^4.0.0"
}
},
"cross-spawn": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
"dev": true,
"requires": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
}
},
"crypto-random-string": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz",
"integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==",
"dev": true
},
"dateformat": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/dateformat/-/dateformat-3.0.3.tgz",
"integrity": "sha512-jyCETtSl3VMZMWeRo7iY1FL19ges1t55hMo5yaam4Jrsm5EPL89UQkoQRyiI+Yf4k8r2ZpdngkV8hr1lIdjb3Q==",
"dev": true
},
"debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"decamelize": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
"integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==",
"dev": true
},
"decamelize-keys": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.1.tgz",
"integrity": "sha512-WiPxgEirIV0/eIOMcnFBA3/IJZAZqKnwAwWyvvdi4lsr1WCN22nhdf/3db3DoZcUjTV2SqfzIwNyp6y2xs3nmg==",
"dev": true,
"requires": {
"decamelize": "^1.1.0",
"map-obj": "^1.0.0"
},
"dependencies": {
"map-obj": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz",
"integrity": "sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==",
"dev": true
}
}
},
"deep-extend": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
"integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
"dev": true
},
"del": {
"version": "6.1.1",
"resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz",
"integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==",
"dev": true,
"requires": {
"globby": "^11.0.1",
"graceful-fs": "^4.2.4",
"is-glob": "^4.0.1",
"is-path-cwd": "^2.2.0",
"is-path-inside": "^3.0.2",
"p-map": "^4.0.0",
"rimraf": "^3.0.2",
"slash": "^3.0.0"
},
"dependencies": {
"p-map": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz",
"integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==",
"dev": true,
"requires": {
"aggregate-error": "^3.0.0"
}
}
}
},
"deprecation": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz",
"integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==",
"dev": true
},
"dir-glob": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
"integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
"dev": true,
"requires": {
"path-type": "^4.0.0"
}
},
"dot-prop": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz",
"integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==",
"dev": true,
"requires": {
"is-obj": "^2.0.0"
}
},
"duplexer2": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz",
"integrity": "sha512-asLFVfWWtJ90ZyOUHMqk7/S2w2guQKxUI2itj3d92ADHhxUSbCMGi1f1cBcJ7xM1To+pE/Khbwo1yuNbMEPKeA==",
"dev": true,
"requires": {
"readable-stream": "^2.0.2"
}
},
"emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
"env-ci": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/env-ci/-/env-ci-8.0.0.tgz",
"integrity": "sha512-W+3BqGZozFua9MPeXpmTm5eYEBtGgL76jGu/pwMVp/L8PdECSCEWaIp7d4Mw7kuUrbUldK0oV0bNd6ZZjLiMiA==",
"dev": true,
"requires": {
"execa": "^6.1.0",
"java-properties": "^1.0.2"
},
"dependencies": {
"execa": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/execa/-/execa-6.1.0.tgz",
"integrity": "sha512-QVWlX2e50heYJcCPG0iWtf8r0xjEYfz/OYLGDYH+IyjWezzPNxz63qNFOu0l4YftGWuizFVZHHs8PrLU5p2IDA==",
"dev": true,
"requires": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.1",
"human-signals": "^3.0.1",
"is-stream": "^3.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^5.1.0",
"onetime": "^6.0.0",
"signal-exit": "^3.0.7",
"strip-final-newline": "^3.0.0"
}
},
"human-signals": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-3.0.1.tgz",
"integrity": "sha512-rQLskxnM/5OCldHo+wNXbpVgDn5A17CUoKX+7Sokwaknlq7CdSnphy0W39GU8dw59XiCXmFXDg4fRuckQRKewQ==",
"dev": true
},
"is-stream": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
"integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
"dev": true
},
"mimic-fn": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
"integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
"dev": true
},
"npm-run-path": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz",
"integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==",
"dev": true,
"requires": {
"path-key": "^4.0.0"
}
},
"onetime": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
"integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
"dev": true,
"requires": {
"mimic-fn": "^4.0.0"
}
},
"path-key": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
"integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
"dev": true
},
"strip-final-newline": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
"integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
"dev": true
}
}
},
"error-ex": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
"integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
"dev": true,
"requires": {
"is-arrayish": "^0.2.1"
}
},
"escalade": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
"integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
"dev": true
},
"escape-string-regexp": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
"dev": true
},
"esprima": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
"integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
"dev": true
},
"execa": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
"integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
"dev": true,
"requires": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.0",
"human-signals": "^2.1.0",
"is-stream": "^2.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^4.0.1",
"onetime": "^5.1.2",
"signal-exit": "^3.0.3",
"strip-final-newline": "^2.0.0"
}
},
"fast-glob": {
"version": "3.2.12",
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz",
"integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==",
"dev": true,
"requires": {
"@nodelib/fs.stat": "^2.0.2",
"@nodelib/fs.walk": "^1.2.3",
"glob-parent": "^5.1.2",
"merge2": "^1.3.0",
"micromatch": "^4.0.4"
}
},
"fastq": {
"version": "1.15.0",
"resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz",
"integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==",
"dev": true,
"requires": {
"reusify": "^1.0.4"
}
},
"figures": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/figures/-/figures-5.0.0.tgz",
"integrity": "sha512-ej8ksPF4x6e5wvK9yevct0UCXh8TTFlWGVLlgjZuoBH1HwjIfKE/IdL5mq89sFA7zELi1VhKpmtDnrs7zWyeyg==",
"dev": true,
"requires": {
"escape-string-regexp": "^5.0.0",
"is-unicode-supported": "^1.2.0"
},
"dependencies": {
"escape-string-regexp": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz",
"integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==",
"dev": true
}
}
},
"fill-range": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
"dev": true,
"requires": {
"to-regex-range": "^5.0.1"
}
},
"find-up": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz",
"integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==",
"dev": true,
"requires": {
"locate-path": "^7.1.0",
"path-exists": "^5.0.0"
}
},
"find-versions": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/find-versions/-/find-versions-5.1.0.tgz",
"integrity": "sha512-+iwzCJ7C5v5KgcBuueqVoNiHVoQpwiUK5XFLjf0affFTep+Wcw93tPvmb8tqujDNmzhBDPddnWV/qgWSXgq+Hg==",
"dev": true,
"requires": {
"semver-regex": "^4.0.5"
}
},
"from2": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
"integrity": "sha512-OMcX/4IC/uqEPVgGeyfN22LJk6AZrMkRZHxcHBMBvHScDGgwTm2GT2Wkgtocyd3JfZffjj2kYUDXXII0Fk9W0g==",
"dev": true,
"requires": {
"inherits": "^2.0.1",
"readable-stream": "^2.0.0"
}
},
"fs-extra": {
"version": "11.1.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.1.0.tgz",
"integrity": "sha512-0rcTq621PD5jM/e0a3EJoGC/1TC5ZBCERW82LQuwfGnCa1V8w7dpYH1yNu+SLb6E5dkeCBzKEyLGlFrnr+dUyw==",
"dev": true,
"requires": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
"universalify": "^2.0.0"
}
},
"fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
"dev": true
},
"function-bind": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
"integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==",
"dev": true
},
"get-caller-file": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
"dev": true
},
"get-stream": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
"integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
"dev": true
},
"git-log-parser": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/git-log-parser/-/git-log-parser-1.2.0.tgz",
"integrity": "sha512-rnCVNfkTL8tdNryFuaY0fYiBWEBcgF748O6ZI61rslBvr2o7U65c2/6npCRqH40vuAhtgtDiqLTJjBVdrejCzA==",
"dev": true,
"requires": {
"argv-formatter": "~1.0.0",
"spawn-error-forwarder": "~1.0.0",
"split2": "~1.0.0",
"stream-combiner2": "~1.1.1",
"through2": "~2.0.0",
"traverse": "~0.6.6"
},
"dependencies": {
"split2": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/split2/-/split2-1.0.0.tgz",
"integrity": "sha512-NKywug4u4pX/AZBB1FCPzZ6/7O+Xhz1qMVbzTvvKvikjO99oPN87SkK08mEY9P63/5lWjK+wgOOgApnTg5r6qg==",
"dev": true,
"requires": {
"through2": "~2.0.0"
}
},
"through2": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
"integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
"dev": true,
"requires": {
"readable-stream": "~2.3.6",
"xtend": "~4.0.1"
}
}
}
},
"glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"dev": true,
"requires": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
}
},
"glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"requires": {
"is-glob": "^4.0.1"
}
},
"globby": {
"version": "11.1.0",
"resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
"integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
"dev": true,
"requires": {
"array-union": "^2.1.0",
"dir-glob": "^3.0.1",
"fast-glob": "^3.2.9",
"ignore": "^5.2.0",
"merge2": "^1.4.1",
"slash": "^3.0.0"
}
},
"graceful-fs": {
"version": "4.2.10",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz",
"integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==",
"dev": true
},
"handlebars": {
"version": "4.7.7",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.7.tgz",
"integrity": "sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==",
"dev": true,
"requires": {
"minimist": "^1.2.5",
"neo-async": "^2.6.0",
"source-map": "^0.6.1",
"uglify-js": "^3.1.4",
"wordwrap": "^1.0.0"
}
},
"hard-rejection": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz",
"integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==",
"dev": true
},
"has": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
"integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
"dev": true,
"requires": {
"function-bind": "^1.1.1"
}
},
"has-flag": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
"dev": true
},
"hook-std": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/hook-std/-/hook-std-3.0.0.tgz",
"integrity": "sha512-jHRQzjSDzMtFy34AGj1DN+vq54WVuhSvKgrHf0OMiFQTwDD4L/qqofVEWjLOBMTn5+lCD3fPg32W9yOfnEJTTw==",
"dev": true
},
"hosted-git-info": {
"version": "6.1.1",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-6.1.1.tgz",
"integrity": "sha512-r0EI+HBMcXadMrugk0GCQ+6BQV39PiWAZVfq7oIckeGiN7sjRGyQxPdft3nQekFTCQbYxLBH+/axZMeH8UX6+w==",
"dev": true,
"requires": {
"lru-cache": "^7.5.1"
}
},
"http-proxy-agent": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz",
"integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==",
"dev": true,
"requires": {
"@tootallnate/once": "2",
"agent-base": "6",
"debug": "4"
}
},
"https-proxy-agent": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz",
"integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==",
"dev": true,
"requires": {
"agent-base": "6",
"debug": "4"
}
},
"human-signals": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
"integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
"dev": true
},
"ignore": {
"version": "5.2.4",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
"integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==",
"dev": true
},
"import-fresh": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
"integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
"dev": true,
"requires": {
"parent-module": "^1.0.0",
"resolve-from": "^4.0.0"
},
"dependencies": {
"resolve-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"dev": true
}
}
},
"import-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/import-from/-/import-from-4.0.0.tgz",
"integrity": "sha512-P9J71vT5nLlDeV8FHs5nNxaLbrpfAV5cF5srvbZfpwpcJoM/xZR3hiv+q+SAnuSmuGbXMWud063iIMx/V/EWZQ==",
"dev": true
},
"indent-string": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
"integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==",
"dev": true
},
"inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"dev": true,
"requires": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"dev": true
},
"ini": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
"dev": true
},
"into-stream": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/into-stream/-/into-stream-6.0.0.tgz",
"integrity": "sha512-XHbaOAvP+uFKUFsOgoNPRjLkwB+I22JFPFe5OjTkQ0nwgj6+pSjb4NmB6VMxaPshLiOf+zcpOCBQuLwC1KHhZA==",
"dev": true,
"requires": {
"from2": "^2.3.0",
"p-is-promise": "^3.0.0"
}
},
"is-arrayish": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
"integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==",
"dev": true
},
"is-core-module": {
"version": "2.11.0",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz",
"integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==",
"dev": true,
"requires": {
"has": "^1.0.3"
}
},
"is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
"integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
"dev": true
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true
},
"is-glob": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
"integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
"dev": true,
"requires": {
"is-extglob": "^2.1.1"
}
},
"is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true
},
"is-obj": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz",
"integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==",
"dev": true
},
"is-path-cwd": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz",
"integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==",
"dev": true
},
"is-path-inside": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
"integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
"dev": true
},
"is-plain-obj": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz",
"integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==",
"dev": true
},
"is-plain-object": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz",
"integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==",
"dev": true
},
"is-stream": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
"integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
"dev": true
},
"is-text-path": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/is-text-path/-/is-text-path-1.0.1.tgz",
"integrity": "sha512-xFuJpne9oFz5qDaodwmmG08e3CawH/2ZV8Qqza1Ko7Sk8POWbkRdwIoAWVhqvq0XeUzANEhKo2n0IXUGBm7A/w==",
"dev": true,
"requires": {
"text-extensions": "^1.0.0"
}
},
"is-unicode-supported": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-1.3.0.tgz",
"integrity": "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==",
"dev": true
},
"isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==",
"dev": true
},
"isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
"dev": true
},
"issue-parser": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/issue-parser/-/issue-parser-6.0.0.tgz",
"integrity": "sha512-zKa/Dxq2lGsBIXQ7CUZWTHfvxPC2ej0KfO7fIPqLlHB9J2hJ7rGhZ5rilhuufylr4RXYPzJUeFjKxz305OsNlA==",
"dev": true,
"requires": {
"lodash.capitalize": "^4.2.1",
"lodash.escaperegexp": "^4.1.2",
"lodash.isplainobject": "^4.0.6",
"lodash.isstring": "^4.0.1",
"lodash.uniqby": "^4.7.0"
}
},
"java-properties": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/java-properties/-/java-properties-1.0.2.tgz",
"integrity": "sha512-qjdpeo2yKlYTH7nFdK0vbZWuTCesk4o63v5iVOlhMQPfuIZQfW/HI35SjfhA+4qpg36rnFSvUK5b1m+ckIblQQ==",
"dev": true
},
"js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
"dev": true
},
"js-yaml": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dev": true,
"requires": {
"argparse": "^2.0.1"
}
},
"json-parse-better-errors": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz",
"integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==",
"dev": true
},
"json-parse-even-better-errors": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
"integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
"dev": true
},
"json-stringify-safe": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
"integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==",
"dev": true
},
"jsonfile": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
"integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
"dev": true,
"requires": {
"graceful-fs": "^4.1.6",
"universalify": "^2.0.0"
}
},
"jsonparse": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz",
"integrity": "sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==",
"dev": true
},
"JSONStream": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz",
"integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==",
"dev": true,
"requires": {
"jsonparse": "^1.2.0",
"through": ">=2.2.7 <3"
}
},
"kind-of": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
"integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
"dev": true
},
"lines-and-columns": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
"integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
"dev": true
},
"load-json-file": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz",
"integrity": "sha512-Kx8hMakjX03tiGTLAIdJ+lL0htKnXjEZN6hk/tozf/WOuYGdZBJrZ+rCJRbVCugsjB3jMLn9746NsQIf5VjBMw==",
"dev": true,
"requires": {
"graceful-fs": "^4.1.2",
"parse-json": "^4.0.0",
"pify": "^3.0.0",
"strip-bom": "^3.0.0"
},
"dependencies": {
"parse-json": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz",
"integrity": "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==",
"dev": true,
"requires": {
"error-ex": "^1.3.1",
"json-parse-better-errors": "^1.0.1"
}
}
}
},
"locate-path": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz",
"integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==",
"dev": true,
"requires": {
"p-locate": "^6.0.0"
}
},
"lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
"dev": true
},
"lodash-es": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
"integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==",
"dev": true
},
"lodash.capitalize": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/lodash.capitalize/-/lodash.capitalize-4.2.1.tgz",
"integrity": "sha512-kZzYOKspf8XVX5AvmQF94gQW0lejFVgb80G85bU4ZWzoJ6C03PQg3coYAUpSTpQWelrZELd3XWgHzw4Ck5kaIw==",
"dev": true
},
"lodash.escaperegexp": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz",
"integrity": "sha512-TM9YBvyC84ZxE3rgfefxUWiQKLilstD6k7PTGt6wfbtXF8ixIJLOL3VYyV/z+ZiPLsVxAsKAFVwWlWeb2Y8Yyw==",
"dev": true
},
"lodash.ismatch": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/lodash.ismatch/-/lodash.ismatch-4.4.0.tgz",
"integrity": "sha512-fPMfXjGQEV9Xsq/8MTSgUf255gawYRbjwMyDbcvDhXgV7enSZA0hynz6vMPnpAb5iONEzBHBPsT+0zes5Z301g==",
"dev": true
},
"lodash.isplainobject": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
"integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==",
"dev": true
},
"lodash.isstring": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz",
"integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==",
"dev": true
},
"lodash.uniqby": {
"version": "4.7.0",
"resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz",
"integrity": "sha512-e/zcLx6CSbmaEgFHCA7BnoQKyCtKMxnuWrJygbwPs/AIn+IMKl66L8/s+wBUn5LRw2pZx3bUHibiV1b6aTWIww==",
"dev": true
},
"lru-cache": {
"version": "7.17.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.17.0.tgz",
"integrity": "sha512-zSxlVVwOabhVyTi6E8gYv2cr6bXK+8ifYz5/uyJb9feXX6NACVDwY4p5Ut3WC3Ivo/QhpARHU3iujx2xGAYHbQ==",
"dev": true
},
"map-obj": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz",
"integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==",
"dev": true
},
"marked": {
"version": "4.2.12",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.2.12.tgz",
"integrity": "sha512-yr8hSKa3Fv4D3jdZmtMMPghgVt6TWbk86WQaWhDloQjRSQhMMYCAro7jP7VDJrjjdV8pxVxMssXS8B8Y5DZ5aw==",
"dev": true
},
"marked-terminal": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/marked-terminal/-/marked-terminal-5.1.1.tgz",
"integrity": "sha512-+cKTOx9P4l7HwINYhzbrBSyzgxO2HaHKGZGuB1orZsMIgXYaJyfidT81VXRdpelW/PcHEWxywscePVgI/oUF6g==",
"dev": true,
"requires": {
"ansi-escapes": "^5.0.0",
"cardinal": "^2.1.1",
"chalk": "^5.0.0",
"cli-table3": "^0.6.1",
"node-emoji": "^1.11.0",
"supports-hyperlinks": "^2.2.0"
},
"dependencies": {
"chalk": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-5.2.0.tgz",
"integrity": "sha512-ree3Gqw/nazQAPuJJEy+avdl7QfZMcUvmHIKgEZkGL+xOBzRvup5Hxo6LHuMceSxOabuJLJm5Yp/92R9eMmMvA==",
"dev": true
}
}
},
"meow": {
"version": "8.1.2",
"resolved": "https://registry.npmjs.org/meow/-/meow-8.1.2.tgz",
"integrity": "sha512-r85E3NdZ+mpYk1C6RjPFEMSE+s1iZMuHtsHAqY0DT3jZczl0diWUZ8g6oU7h0M9cD2EL+PzaYghhCLzR0ZNn5Q==",
"dev": true,
"requires": {
"@types/minimist": "^1.2.0",
"camelcase-keys": "^6.2.2",
"decamelize-keys": "^1.1.0",
"hard-rejection": "^2.1.0",
"minimist-options": "4.1.0",
"normalize-package-data": "^3.0.0",
"read-pkg-up": "^7.0.1",
"redent": "^3.0.0",
"trim-newlines": "^3.0.0",
"type-fest": "^0.18.0",
"yargs-parser": "^20.2.3"
},
"dependencies": {
"find-up": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
"integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
"dev": true,
"requires": {
"locate-path": "^5.0.0",
"path-exists": "^4.0.0"
}
},
"locate-path": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
"integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
"dev": true,
"requires": {
"p-locate": "^4.1.0"
}
},
"p-limit": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
"integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
"dev": true,
"requires": {
"p-try": "^2.0.0"
}
},
"p-locate": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
"integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
"dev": true,
"requires": {
"p-limit": "^2.2.0"
}
},
"path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"dev": true
},
"read-pkg-up": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz",
"integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==",
"dev": true,
"requires": {
"find-up": "^4.1.0",
"read-pkg": "^5.2.0",
"type-fest": "^0.8.1"
},
"dependencies": {
"type-fest": {
"version": "0.8.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
"integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
"dev": true
}
}
},
"type-fest": {
"version": "0.18.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz",
"integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==",
"dev": true
}
}
},
"merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
"integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
"dev": true
},
"merge2": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
"dev": true
},
"micromatch": {
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
"integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
"dev": true,
"requires": {
"braces": "^3.0.2",
"picomatch": "^2.3.1"
}
},
"mime": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-3.0.0.tgz",
"integrity": "sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A==",
"dev": true
},
"mimic-fn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
"integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
"dev": true
},
"min-indent": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz",
"integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==",
"dev": true
},
"minimatch": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"dev": true,
"requires": {
"brace-expansion": "^1.1.7"
}
},
"minimist": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
"integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
"dev": true
},
"minimist-options": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz",
"integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==",
"dev": true,
"requires": {
"arrify": "^1.0.1",
"is-plain-obj": "^1.1.0",
"kind-of": "^6.0.3"
}
},
"modify-values": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/modify-values/-/modify-values-1.0.1.tgz",
"integrity": "sha512-xV2bxeN6F7oYjZWTe/YPAy6MN2M+sL4u/Rlm2AHCIVGfo2p1yGmBHQ6vHehl4bRTZBdHu3TSkWdYgkwpYzAGSw==",
"dev": true
},
"ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
"dev": true
},
"neo-async": {
"version": "2.6.2",
"resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
"integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==",
"dev": true
},
"nerf-dart": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/nerf-dart/-/nerf-dart-1.0.0.tgz",
"integrity": "sha512-EZSPZB70jiVsivaBLYDCyntd5eH8NTSMOn3rB+HxwdmKThGELLdYv8qVIMWvZEFy9w8ZZpW9h9OB32l1rGtj7g==",
"dev": true
},
"node-emoji": {
"version": "1.11.0",
"resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz",
"integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==",
"dev": true,
"requires": {
"lodash": "^4.17.21"
}
},
"node-fetch": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.9.tgz",
"integrity": "sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg==",
"dev": true,
"requires": {
"whatwg-url": "^5.0.0"
}
},
"normalize-package-data": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.3.tgz",
"integrity": "sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==",
"dev": true,
"requires": {
"hosted-git-info": "^4.0.1",
"is-core-module": "^2.5.0",
"semver": "^7.3.4",
"validate-npm-package-license": "^3.0.1"
},
"dependencies": {
"hosted-git-info": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz",
"integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
},
"lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
"requires": {
"yallist": "^4.0.0"
}
}
}
},
"normalize-url": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz",
"integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==",
"dev": true
},
"npm": {
"version": "8.19.4",
"resolved": "https://registry.npmjs.org/npm/-/npm-8.19.4.tgz",
"integrity": "sha512-3HANl8i9DKnUA89P4KEgVNN28EjSeDCmvEqbzOAuxCFDzdBZzjUl99zgnGpOUumvW5lvJo2HKcjrsc+tfyv1Hw==",
"dev": true,
"requires": {
"@isaacs/string-locale-compare": "^1.1.0",
"@npmcli/arborist": "^5.6.3",
"@npmcli/ci-detect": "^2.0.0",
"@npmcli/config": "^4.2.1",
"@npmcli/fs": "^2.1.0",
"@npmcli/map-workspaces": "^2.0.3",
"@npmcli/package-json": "^2.0.0",
"@npmcli/run-script": "^4.2.1",
"abbrev": "~1.1.1",
"archy": "~1.0.0",
"cacache": "^16.1.3",
"chalk": "^4.1.2",
"chownr": "^2.0.0",
"cli-columns": "^4.0.0",
"cli-table3": "^0.6.2",
"columnify": "^1.6.0",
"fastest-levenshtein": "^1.0.12",
"fs-minipass": "^2.1.0",
"glob": "^8.0.1",
"graceful-fs": "^4.2.10",
"hosted-git-info": "^5.2.1",
"ini": "^3.0.1",
"init-package-json": "^3.0.2",
"is-cidr": "^4.0.2",
"json-parse-even-better-errors": "^2.3.1",
"libnpmaccess": "^6.0.4",
"libnpmdiff": "^4.0.5",
"libnpmexec": "^4.0.14",
"libnpmfund": "^3.0.5",
"libnpmhook": "^8.0.4",
"libnpmorg": "^4.0.4",
"libnpmpack": "^4.1.3",
"libnpmpublish": "^6.0.5",
"libnpmsearch": "^5.0.4",
"libnpmteam": "^4.0.4",
"libnpmversion": "^3.0.7",
"make-fetch-happen": "^10.2.0",
"minimatch": "^5.1.0",
"minipass": "^3.1.6",
"minipass-pipeline": "^1.2.4",
"mkdirp": "^1.0.4",
"mkdirp-infer-owner": "^2.0.0",
"ms": "^2.1.2",
"node-gyp": "^9.1.0",
"nopt": "^6.0.0",
"npm-audit-report": "^3.0.0",
"npm-install-checks": "^5.0.0",
"npm-package-arg": "^9.1.0",
"npm-pick-manifest": "^7.0.2",
"npm-profile": "^6.2.0",
"npm-registry-fetch": "^13.3.1",
"npm-user-validate": "^1.0.1",
"npmlog": "^6.0.2",
"opener": "^1.5.2",
"p-map": "^4.0.0",
"pacote": "^13.6.2",
"parse-conflict-json": "^2.0.2",
"proc-log": "^2.0.1",
"qrcode-terminal": "^0.12.0",
"read": "~1.0.7",
"read-package-json": "^5.0.2",
"read-package-json-fast": "^2.0.3",
"readdir-scoped-modules": "^1.1.0",
"rimraf": "^3.0.2",
"semver": "^7.3.7",
"ssri": "^9.0.1",
"tar": "^6.1.11",
"text-table": "~0.2.0",
"tiny-relative-date": "^1.3.0",
"treeverse": "^2.0.0",
"validate-npm-package-name": "^4.0.0",
"which": "^2.0.2",
"write-file-atomic": "^4.0.1"
},
"dependencies": {
"@colors/colors": {
"version": "1.5.0",
"bundled": true,
"dev": true,
"optional": true
},
"@gar/promisify": {
"version": "1.1.3",
"bundled": true,
"dev": true
},
"@isaacs/string-locale-compare": {
"version": "1.1.0",
"bundled": true,
"dev": true
},
"@npmcli/arborist": {
"version": "5.6.3",
"bundled": true,
"dev": true,
"requires": {
"@isaacs/string-locale-compare": "^1.1.0",
"@npmcli/installed-package-contents": "^1.0.7",
"@npmcli/map-workspaces": "^2.0.3",
"@npmcli/metavuln-calculator": "^3.0.1",
"@npmcli/move-file": "^2.0.0",
"@npmcli/name-from-folder": "^1.0.1",
"@npmcli/node-gyp": "^2.0.0",
"@npmcli/package-json": "^2.0.0",
"@npmcli/query": "^1.2.0",
"@npmcli/run-script": "^4.1.3",
"bin-links": "^3.0.3",
"cacache": "^16.1.3",
"common-ancestor-path": "^1.0.1",
"hosted-git-info": "^5.2.1",
"json-parse-even-better-errors": "^2.3.1",
"json-stringify-nice": "^1.1.4",
"minimatch": "^5.1.0",
"mkdirp": "^1.0.4",
"mkdirp-infer-owner": "^2.0.0",
"nopt": "^6.0.0",
"npm-install-checks": "^5.0.0",
"npm-package-arg": "^9.0.0",
"npm-pick-manifest": "^7.0.2",
"npm-registry-fetch": "^13.0.0",
"npmlog": "^6.0.2",
"pacote": "^13.6.1",
"parse-conflict-json": "^2.0.1",
"proc-log": "^2.0.0",
"promise-all-reject-late": "^1.0.0",
"promise-call-limit": "^1.0.1",
"read-package-json-fast": "^2.0.2",
"readdir-scoped-modules": "^1.1.0",
"rimraf": "^3.0.2",
"semver": "^7.3.7",
"ssri": "^9.0.0",
"treeverse": "^2.0.0",
"walk-up-path": "^1.0.0"
}
},
"@npmcli/ci-detect": {
"version": "2.0.0",
"bundled": true,
"dev": true
},
"@npmcli/config": {
"version": "4.2.2",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/map-workspaces": "^2.0.2",
"ini": "^3.0.0",
"mkdirp-infer-owner": "^2.0.0",
"nopt": "^6.0.0",
"proc-log": "^2.0.0",
"read-package-json-fast": "^2.0.3",
"semver": "^7.3.5",
"walk-up-path": "^1.0.0"
}
},
"@npmcli/disparity-colors": {
"version": "2.0.0",
"bundled": true,
"dev": true,
"requires": {
"ansi-styles": "^4.3.0"
}
},
"@npmcli/fs": {
"version": "2.1.2",
"bundled": true,
"dev": true,
"requires": {
"@gar/promisify": "^1.1.3",
"semver": "^7.3.5"
}
},
"@npmcli/git": {
"version": "3.0.2",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/promise-spawn": "^3.0.0",
"lru-cache": "^7.4.4",
"mkdirp": "^1.0.4",
"npm-pick-manifest": "^7.0.0",
"proc-log": "^2.0.0",
"promise-inflight": "^1.0.1",
"promise-retry": "^2.0.1",
"semver": "^7.3.5",
"which": "^2.0.2"
}
},
"@npmcli/installed-package-contents": {
"version": "1.0.7",
"bundled": true,
"dev": true,
"requires": {
"npm-bundled": "^1.1.1",
"npm-normalize-package-bin": "^1.0.1"
},
"dependencies": {
"npm-bundled": {
"version": "1.1.2",
"bundled": true,
"dev": true,
"requires": {
"npm-normalize-package-bin": "^1.0.1"
}
}
}
},
"@npmcli/map-workspaces": {
"version": "2.0.4",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/name-from-folder": "^1.0.1",
"glob": "^8.0.1",
"minimatch": "^5.0.1",
"read-package-json-fast": "^2.0.3"
}
},
"@npmcli/metavuln-calculator": {
"version": "3.1.1",
"bundled": true,
"dev": true,
"requires": {
"cacache": "^16.0.0",
"json-parse-even-better-errors": "^2.3.1",
"pacote": "^13.0.3",
"semver": "^7.3.5"
}
},
"@npmcli/move-file": {
"version": "2.0.1",
"bundled": true,
"dev": true,
"requires": {
"mkdirp": "^1.0.4",
"rimraf": "^3.0.2"
}
},
"@npmcli/name-from-folder": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"@npmcli/node-gyp": {
"version": "2.0.0",
"bundled": true,
"dev": true
},
"@npmcli/package-json": {
"version": "2.0.0",
"bundled": true,
"dev": true,
"requires": {
"json-parse-even-better-errors": "^2.3.1"
}
},
"@npmcli/promise-spawn": {
"version": "3.0.0",
"bundled": true,
"dev": true,
"requires": {
"infer-owner": "^1.0.4"
}
},
"@npmcli/query": {
"version": "1.2.0",
"bundled": true,
"dev": true,
"requires": {
"npm-package-arg": "^9.1.0",
"postcss-selector-parser": "^6.0.10",
"semver": "^7.3.7"
}
},
"@npmcli/run-script": {
"version": "4.2.1",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/node-gyp": "^2.0.0",
"@npmcli/promise-spawn": "^3.0.0",
"node-gyp": "^9.0.0",
"read-package-json-fast": "^2.0.3",
"which": "^2.0.2"
}
},
"@tootallnate/once": {
"version": "2.0.0",
"bundled": true,
"dev": true
},
"abbrev": {
"version": "1.1.1",
"bundled": true,
"dev": true
},
"agent-base": {
"version": "6.0.2",
"bundled": true,
"dev": true,
"requires": {
"debug": "4"
}
},
"agentkeepalive": {
"version": "4.2.1",
"bundled": true,
"dev": true,
"requires": {
"debug": "^4.1.0",
"depd": "^1.1.2",
"humanize-ms": "^1.2.1"
}
},
"aggregate-error": {
"version": "3.1.0",
"bundled": true,
"dev": true,
"requires": {
"clean-stack": "^2.0.0",
"indent-string": "^4.0.0"
}
},
"ansi-regex": {
"version": "5.0.1",
"bundled": true,
"dev": true
},
"ansi-styles": {
"version": "4.3.0",
"bundled": true,
"dev": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"aproba": {
"version": "2.0.0",
"bundled": true,
"dev": true
},
"archy": {
"version": "1.0.0",
"bundled": true,
"dev": true
},
"are-we-there-yet": {
"version": "3.0.1",
"bundled": true,
"dev": true,
"requires": {
"delegates": "^1.0.0",
"readable-stream": "^3.6.0"
}
},
"asap": {
"version": "2.0.6",
"bundled": true,
"dev": true
},
"balanced-match": {
"version": "1.0.2",
"bundled": true,
"dev": true
},
"bin-links": {
"version": "3.0.3",
"bundled": true,
"dev": true,
"requires": {
"cmd-shim": "^5.0.0",
"mkdirp-infer-owner": "^2.0.0",
"npm-normalize-package-bin": "^2.0.0",
"read-cmd-shim": "^3.0.0",
"rimraf": "^3.0.0",
"write-file-atomic": "^4.0.0"
},
"dependencies": {
"npm-normalize-package-bin": {
"version": "2.0.0",
"bundled": true,
"dev": true
}
}
},
"binary-extensions": {
"version": "2.2.0",
"bundled": true,
"dev": true
},
"brace-expansion": {
"version": "2.0.1",
"bundled": true,
"dev": true,
"requires": {
"balanced-match": "^1.0.0"
}
},
"builtins": {
"version": "5.0.1",
"bundled": true,
"dev": true,
"requires": {
"semver": "^7.0.0"
}
},
"cacache": {
"version": "16.1.3",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/fs": "^2.1.0",
"@npmcli/move-file": "^2.0.0",
"chownr": "^2.0.0",
"fs-minipass": "^2.1.0",
"glob": "^8.0.1",
"infer-owner": "^1.0.4",
"lru-cache": "^7.7.1",
"minipass": "^3.1.6",
"minipass-collect": "^1.0.2",
"minipass-flush": "^1.0.5",
"minipass-pipeline": "^1.2.4",
"mkdirp": "^1.0.4",
"p-map": "^4.0.0",
"promise-inflight": "^1.0.1",
"rimraf": "^3.0.2",
"ssri": "^9.0.0",
"tar": "^6.1.11",
"unique-filename": "^2.0.0"
}
},
"chalk": {
"version": "4.1.2",
"bundled": true,
"dev": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"chownr": {
"version": "2.0.0",
"bundled": true,
"dev": true
},
"cidr-regex": {
"version": "3.1.1",
"bundled": true,
"dev": true,
"requires": {
"ip-regex": "^4.1.0"
}
},
"clean-stack": {
"version": "2.2.0",
"bundled": true,
"dev": true
},
"cli-columns": {
"version": "4.0.0",
"bundled": true,
"dev": true,
"requires": {
"string-width": "^4.2.3",
"strip-ansi": "^6.0.1"
}
},
"cli-table3": {
"version": "0.6.2",
"bundled": true,
"dev": true,
"requires": {
"@colors/colors": "1.5.0",
"string-width": "^4.2.0"
}
},
"clone": {
"version": "1.0.4",
"bundled": true,
"dev": true
},
"cmd-shim": {
"version": "5.0.0",
"bundled": true,
"dev": true,
"requires": {
"mkdirp-infer-owner": "^2.0.0"
}
},
"color-convert": {
"version": "2.0.1",
"bundled": true,
"dev": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"bundled": true,
"dev": true
},
"color-support": {
"version": "1.1.3",
"bundled": true,
"dev": true
},
"columnify": {
"version": "1.6.0",
"bundled": true,
"dev": true,
"requires": {
"strip-ansi": "^6.0.1",
"wcwidth": "^1.0.0"
}
},
"common-ancestor-path": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"concat-map": {
"version": "0.0.1",
"bundled": true,
"dev": true
},
"console-control-strings": {
"version": "1.1.0",
"bundled": true,
"dev": true
},
"cssesc": {
"version": "3.0.0",
"bundled": true,
"dev": true
},
"debug": {
"version": "4.3.4",
"bundled": true,
"dev": true,
"requires": {
"ms": "2.1.2"
},
"dependencies": {
"ms": {
"version": "2.1.2",
"bundled": true,
"dev": true
}
}
},
"debuglog": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"defaults": {
"version": "1.0.3",
"bundled": true,
"dev": true,
"requires": {
"clone": "^1.0.2"
}
},
"delegates": {
"version": "1.0.0",
"bundled": true,
"dev": true
},
"depd": {
"version": "1.1.2",
"bundled": true,
"dev": true
},
"dezalgo": {
"version": "1.0.4",
"bundled": true,
"dev": true,
"requires": {
"asap": "^2.0.0",
"wrappy": "1"
}
},
"diff": {
"version": "5.1.0",
"bundled": true,
"dev": true
},
"emoji-regex": {
"version": "8.0.0",
"bundled": true,
"dev": true
},
"encoding": {
"version": "0.1.13",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"iconv-lite": "^0.6.2"
}
},
"env-paths": {
"version": "2.2.1",
"bundled": true,
"dev": true
},
"err-code": {
"version": "2.0.3",
"bundled": true,
"dev": true
},
"fastest-levenshtein": {
"version": "1.0.12",
"bundled": true,
"dev": true
},
"fs-minipass": {
"version": "2.1.0",
"bundled": true,
"dev": true,
"requires": {
"minipass": "^3.0.0"
}
},
"fs.realpath": {
"version": "1.0.0",
"bundled": true,
"dev": true
},
"function-bind": {
"version": "1.1.1",
"bundled": true,
"dev": true
},
"gauge": {
"version": "4.0.4",
"bundled": true,
"dev": true,
"requires": {
"aproba": "^1.0.3 || ^2.0.0",
"color-support": "^1.1.3",
"console-control-strings": "^1.1.0",
"has-unicode": "^2.0.1",
"signal-exit": "^3.0.7",
"string-width": "^4.2.3",
"strip-ansi": "^6.0.1",
"wide-align": "^1.1.5"
}
},
"glob": {
"version": "8.0.3",
"bundled": true,
"dev": true,
"requires": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^5.0.1",
"once": "^1.3.0"
}
},
"graceful-fs": {
"version": "4.2.10",
"bundled": true,
"dev": true
},
"has": {
"version": "1.0.3",
"bundled": true,
"dev": true,
"requires": {
"function-bind": "^1.1.1"
}
},
"has-flag": {
"version": "4.0.0",
"bundled": true,
"dev": true
},
"has-unicode": {
"version": "2.0.1",
"bundled": true,
"dev": true
},
"hosted-git-info": {
"version": "5.2.1",
"bundled": true,
"dev": true,
"requires": {
"lru-cache": "^7.5.1"
}
},
"http-cache-semantics": {
"version": "4.1.1",
"bundled": true,
"dev": true
},
"http-proxy-agent": {
"version": "5.0.0",
"bundled": true,
"dev": true,
"requires": {
"@tootallnate/once": "2",
"agent-base": "6",
"debug": "4"
}
},
"https-proxy-agent": {
"version": "5.0.1",
"bundled": true,
"dev": true,
"requires": {
"agent-base": "6",
"debug": "4"
}
},
"humanize-ms": {
"version": "1.2.1",
"bundled": true,
"dev": true,
"requires": {
"ms": "^2.0.0"
}
},
"iconv-lite": {
"version": "0.6.3",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"safer-buffer": ">= 2.1.2 < 3.0.0"
}
},
"ignore-walk": {
"version": "5.0.1",
"bundled": true,
"dev": true,
"requires": {
"minimatch": "^5.0.1"
}
},
"imurmurhash": {
"version": "0.1.4",
"bundled": true,
"dev": true
},
"indent-string": {
"version": "4.0.0",
"bundled": true,
"dev": true
},
"infer-owner": {
"version": "1.0.4",
"bundled": true,
"dev": true
},
"inflight": {
"version": "1.0.6",
"bundled": true,
"dev": true,
"requires": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"inherits": {
"version": "2.0.4",
"bundled": true,
"dev": true
},
"ini": {
"version": "3.0.1",
"bundled": true,
"dev": true
},
"init-package-json": {
"version": "3.0.2",
"bundled": true,
"dev": true,
"requires": {
"npm-package-arg": "^9.0.1",
"promzard": "^0.3.0",
"read": "^1.0.7",
"read-package-json": "^5.0.0",
"semver": "^7.3.5",
"validate-npm-package-license": "^3.0.4",
"validate-npm-package-name": "^4.0.0"
}
},
"ip": {
"version": "2.0.0",
"bundled": true,
"dev": true
},
"ip-regex": {
"version": "4.3.0",
"bundled": true,
"dev": true
},
"is-cidr": {
"version": "4.0.2",
"bundled": true,
"dev": true,
"requires": {
"cidr-regex": "^3.1.1"
}
},
"is-core-module": {
"version": "2.10.0",
"bundled": true,
"dev": true,
"requires": {
"has": "^1.0.3"
}
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"bundled": true,
"dev": true
},
"is-lambda": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"isexe": {
"version": "2.0.0",
"bundled": true,
"dev": true
},
"json-parse-even-better-errors": {
"version": "2.3.1",
"bundled": true,
"dev": true
},
"json-stringify-nice": {
"version": "1.1.4",
"bundled": true,
"dev": true
},
"jsonparse": {
"version": "1.3.1",
"bundled": true,
"dev": true
},
"just-diff": {
"version": "5.1.1",
"bundled": true,
"dev": true
},
"just-diff-apply": {
"version": "5.4.1",
"bundled": true,
"dev": true
},
"libnpmaccess": {
"version": "6.0.4",
"bundled": true,
"dev": true,
"requires": {
"aproba": "^2.0.0",
"minipass": "^3.1.1",
"npm-package-arg": "^9.0.1",
"npm-registry-fetch": "^13.0.0"
}
},
"libnpmdiff": {
"version": "4.0.5",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/disparity-colors": "^2.0.0",
"@npmcli/installed-package-contents": "^1.0.7",
"binary-extensions": "^2.2.0",
"diff": "^5.1.0",
"minimatch": "^5.0.1",
"npm-package-arg": "^9.0.1",
"pacote": "^13.6.1",
"tar": "^6.1.0"
}
},
"libnpmexec": {
"version": "4.0.14",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/arborist": "^5.6.3",
"@npmcli/ci-detect": "^2.0.0",
"@npmcli/fs": "^2.1.1",
"@npmcli/run-script": "^4.2.0",
"chalk": "^4.1.0",
"mkdirp-infer-owner": "^2.0.0",
"npm-package-arg": "^9.0.1",
"npmlog": "^6.0.2",
"pacote": "^13.6.1",
"proc-log": "^2.0.0",
"read": "^1.0.7",
"read-package-json-fast": "^2.0.2",
"semver": "^7.3.7",
"walk-up-path": "^1.0.0"
}
},
"libnpmfund": {
"version": "3.0.5",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/arborist": "^5.6.3"
}
},
"libnpmhook": {
"version": "8.0.4",
"bundled": true,
"dev": true,
"requires": {
"aproba": "^2.0.0",
"npm-registry-fetch": "^13.0.0"
}
},
"libnpmorg": {
"version": "4.0.4",
"bundled": true,
"dev": true,
"requires": {
"aproba": "^2.0.0",
"npm-registry-fetch": "^13.0.0"
}
},
"libnpmpack": {
"version": "4.1.3",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/run-script": "^4.1.3",
"npm-package-arg": "^9.0.1",
"pacote": "^13.6.1"
}
},
"libnpmpublish": {
"version": "6.0.5",
"bundled": true,
"dev": true,
"requires": {
"normalize-package-data": "^4.0.0",
"npm-package-arg": "^9.0.1",
"npm-registry-fetch": "^13.0.0",
"semver": "^7.3.7",
"ssri": "^9.0.0"
}
},
"libnpmsearch": {
"version": "5.0.4",
"bundled": true,
"dev": true,
"requires": {
"npm-registry-fetch": "^13.0.0"
}
},
"libnpmteam": {
"version": "4.0.4",
"bundled": true,
"dev": true,
"requires": {
"aproba": "^2.0.0",
"npm-registry-fetch": "^13.0.0"
}
},
"libnpmversion": {
"version": "3.0.7",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/git": "^3.0.0",
"@npmcli/run-script": "^4.1.3",
"json-parse-even-better-errors": "^2.3.1",
"proc-log": "^2.0.0",
"semver": "^7.3.7"
}
},
"lru-cache": {
"version": "7.13.2",
"bundled": true,
"dev": true
},
"make-fetch-happen": {
"version": "10.2.1",
"bundled": true,
"dev": true,
"requires": {
"agentkeepalive": "^4.2.1",
"cacache": "^16.1.0",
"http-cache-semantics": "^4.1.0",
"http-proxy-agent": "^5.0.0",
"https-proxy-agent": "^5.0.0",
"is-lambda": "^1.0.1",
"lru-cache": "^7.7.1",
"minipass": "^3.1.6",
"minipass-collect": "^1.0.2",
"minipass-fetch": "^2.0.3",
"minipass-flush": "^1.0.5",
"minipass-pipeline": "^1.2.4",
"negotiator": "^0.6.3",
"promise-retry": "^2.0.1",
"socks-proxy-agent": "^7.0.0",
"ssri": "^9.0.0"
}
},
"minimatch": {
"version": "5.1.0",
"bundled": true,
"dev": true,
"requires": {
"brace-expansion": "^2.0.1"
}
},
"minipass": {
"version": "3.3.4",
"bundled": true,
"dev": true,
"requires": {
"yallist": "^4.0.0"
}
},
"minipass-collect": {
"version": "1.0.2",
"bundled": true,
"dev": true,
"requires": {
"minipass": "^3.0.0"
}
},
"minipass-fetch": {
"version": "2.1.1",
"bundled": true,
"dev": true,
"requires": {
"encoding": "^0.1.13",
"minipass": "^3.1.6",
"minipass-sized": "^1.0.3",
"minizlib": "^2.1.2"
}
},
"minipass-flush": {
"version": "1.0.5",
"bundled": true,
"dev": true,
"requires": {
"minipass": "^3.0.0"
}
},
"minipass-json-stream": {
"version": "1.0.1",
"bundled": true,
"dev": true,
"requires": {
"jsonparse": "^1.3.1",
"minipass": "^3.0.0"
}
},
"minipass-pipeline": {
"version": "1.2.4",
"bundled": true,
"dev": true,
"requires": {
"minipass": "^3.0.0"
}
},
"minipass-sized": {
"version": "1.0.3",
"bundled": true,
"dev": true,
"requires": {
"minipass": "^3.0.0"
}
},
"minizlib": {
"version": "2.1.2",
"bundled": true,
"dev": true,
"requires": {
"minipass": "^3.0.0",
"yallist": "^4.0.0"
}
},
"mkdirp": {
"version": "1.0.4",
"bundled": true,
"dev": true
},
"mkdirp-infer-owner": {
"version": "2.0.0",
"bundled": true,
"dev": true,
"requires": {
"chownr": "^2.0.0",
"infer-owner": "^1.0.4",
"mkdirp": "^1.0.3"
}
},
"ms": {
"version": "2.1.3",
"bundled": true,
"dev": true
},
"mute-stream": {
"version": "0.0.8",
"bundled": true,
"dev": true
},
"negotiator": {
"version": "0.6.3",
"bundled": true,
"dev": true
},
"node-gyp": {
"version": "9.1.0",
"bundled": true,
"dev": true,
"requires": {
"env-paths": "^2.2.0",
"glob": "^7.1.4",
"graceful-fs": "^4.2.6",
"make-fetch-happen": "^10.0.3",
"nopt": "^5.0.0",
"npmlog": "^6.0.0",
"rimraf": "^3.0.2",
"semver": "^7.3.5",
"tar": "^6.1.2",
"which": "^2.0.2"
},
"dependencies": {
"brace-expansion": {
"version": "1.1.11",
"bundled": true,
"dev": true,
"requires": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"glob": {
"version": "7.2.3",
"bundled": true,
"dev": true,
"requires": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
}
},
"minimatch": {
"version": "3.1.2",
"bundled": true,
"dev": true,
"requires": {
"brace-expansion": "^1.1.7"
}
},
"nopt": {
"version": "5.0.0",
"bundled": true,
"dev": true,
"requires": {
"abbrev": "1"
}
}
}
},
"nopt": {
"version": "6.0.0",
"bundled": true,
"dev": true,
"requires": {
"abbrev": "^1.0.0"
}
},
"normalize-package-data": {
"version": "4.0.1",
"bundled": true,
"dev": true,
"requires": {
"hosted-git-info": "^5.0.0",
"is-core-module": "^2.8.1",
"semver": "^7.3.5",
"validate-npm-package-license": "^3.0.4"
}
},
"npm-audit-report": {
"version": "3.0.0",
"bundled": true,
"dev": true,
"requires": {
"chalk": "^4.0.0"
}
},
"npm-bundled": {
"version": "2.0.1",
"bundled": true,
"dev": true,
"requires": {
"npm-normalize-package-bin": "^2.0.0"
},
"dependencies": {
"npm-normalize-package-bin": {
"version": "2.0.0",
"bundled": true,
"dev": true
}
}
},
"npm-install-checks": {
"version": "5.0.0",
"bundled": true,
"dev": true,
"requires": {
"semver": "^7.1.1"
}
},
"npm-normalize-package-bin": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"npm-package-arg": {
"version": "9.1.0",
"bundled": true,
"dev": true,
"requires": {
"hosted-git-info": "^5.0.0",
"proc-log": "^2.0.1",
"semver": "^7.3.5",
"validate-npm-package-name": "^4.0.0"
}
},
"npm-packlist": {
"version": "5.1.3",
"bundled": true,
"dev": true,
"requires": {
"glob": "^8.0.1",
"ignore-walk": "^5.0.1",
"npm-bundled": "^2.0.0",
"npm-normalize-package-bin": "^2.0.0"
},
"dependencies": {
"npm-normalize-package-bin": {
"version": "2.0.0",
"bundled": true,
"dev": true
}
}
},
"npm-pick-manifest": {
"version": "7.0.2",
"bundled": true,
"dev": true,
"requires": {
"npm-install-checks": "^5.0.0",
"npm-normalize-package-bin": "^2.0.0",
"npm-package-arg": "^9.0.0",
"semver": "^7.3.5"
},
"dependencies": {
"npm-normalize-package-bin": {
"version": "2.0.0",
"bundled": true,
"dev": true
}
}
},
"npm-profile": {
"version": "6.2.1",
"bundled": true,
"dev": true,
"requires": {
"npm-registry-fetch": "^13.0.1",
"proc-log": "^2.0.0"
}
},
"npm-registry-fetch": {
"version": "13.3.1",
"bundled": true,
"dev": true,
"requires": {
"make-fetch-happen": "^10.0.6",
"minipass": "^3.1.6",
"minipass-fetch": "^2.0.3",
"minipass-json-stream": "^1.0.1",
"minizlib": "^2.1.2",
"npm-package-arg": "^9.0.1",
"proc-log": "^2.0.0"
}
},
"npm-user-validate": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"npmlog": {
"version": "6.0.2",
"bundled": true,
"dev": true,
"requires": {
"are-we-there-yet": "^3.0.0",
"console-control-strings": "^1.1.0",
"gauge": "^4.0.3",
"set-blocking": "^2.0.0"
}
},
"once": {
"version": "1.4.0",
"bundled": true,
"dev": true,
"requires": {
"wrappy": "1"
}
},
"opener": {
"version": "1.5.2",
"bundled": true,
"dev": true
},
"p-map": {
"version": "4.0.0",
"bundled": true,
"dev": true,
"requires": {
"aggregate-error": "^3.0.0"
}
},
"pacote": {
"version": "13.6.2",
"bundled": true,
"dev": true,
"requires": {
"@npmcli/git": "^3.0.0",
"@npmcli/installed-package-contents": "^1.0.7",
"@npmcli/promise-spawn": "^3.0.0",
"@npmcli/run-script": "^4.1.0",
"cacache": "^16.0.0",
"chownr": "^2.0.0",
"fs-minipass": "^2.1.0",
"infer-owner": "^1.0.4",
"minipass": "^3.1.6",
"mkdirp": "^1.0.4",
"npm-package-arg": "^9.0.0",
"npm-packlist": "^5.1.0",
"npm-pick-manifest": "^7.0.0",
"npm-registry-fetch": "^13.0.1",
"proc-log": "^2.0.0",
"promise-retry": "^2.0.1",
"read-package-json": "^5.0.0",
"read-package-json-fast": "^2.0.3",
"rimraf": "^3.0.2",
"ssri": "^9.0.0",
"tar": "^6.1.11"
}
},
"parse-conflict-json": {
"version": "2.0.2",
"bundled": true,
"dev": true,
"requires": {
"json-parse-even-better-errors": "^2.3.1",
"just-diff": "^5.0.1",
"just-diff-apply": "^5.2.0"
}
},
"path-is-absolute": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"postcss-selector-parser": {
"version": "6.0.10",
"bundled": true,
"dev": true,
"requires": {
"cssesc": "^3.0.0",
"util-deprecate": "^1.0.2"
}
},
"proc-log": {
"version": "2.0.1",
"bundled": true,
"dev": true
},
"promise-all-reject-late": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"promise-call-limit": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"promise-inflight": {
"version": "1.0.1",
"bundled": true,
"dev": true
},
"promise-retry": {
"version": "2.0.1",
"bundled": true,
"dev": true,
"requires": {
"err-code": "^2.0.2",
"retry": "^0.12.0"
}
},
"promzard": {
"version": "0.3.0",
"bundled": true,
"dev": true,
"requires": {
"read": "1"
}
},
"qrcode-terminal": {
"version": "0.12.0",
"bundled": true,
"dev": true
},
"read": {
"version": "1.0.7",
"bundled": true,
"dev": true,
"requires": {
"mute-stream": "~0.0.4"
}
},
"read-cmd-shim": {
"version": "3.0.0",
"bundled": true,
"dev": true
},
"read-package-json": {
"version": "5.0.2",
"bundled": true,
"dev": true,
"requires": {
"glob": "^8.0.1",
"json-parse-even-better-errors": "^2.3.1",
"normalize-package-data": "^4.0.0",
"npm-normalize-package-bin": "^2.0.0"
},
"dependencies": {
"npm-normalize-package-bin": {
"version": "2.0.0",
"bundled": true,
"dev": true
}
}
},
"read-package-json-fast": {
"version": "2.0.3",
"bundled": true,
"dev": true,
"requires": {
"json-parse-even-better-errors": "^2.3.0",
"npm-normalize-package-bin": "^1.0.1"
}
},
"readable-stream": {
"version": "3.6.0",
"bundled": true,
"dev": true,
"requires": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
}
},
"readdir-scoped-modules": {
"version": "1.1.0",
"bundled": true,
"dev": true,
"requires": {
"debuglog": "^1.0.1",
"dezalgo": "^1.0.0",
"graceful-fs": "^4.1.2",
"once": "^1.3.0"
}
},
"retry": {
"version": "0.12.0",
"bundled": true,
"dev": true
},
"rimraf": {
"version": "3.0.2",
"bundled": true,
"dev": true,
"requires": {
"glob": "^7.1.3"
},
"dependencies": {
"brace-expansion": {
"version": "1.1.11",
"bundled": true,
"dev": true,
"requires": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"glob": {
"version": "7.2.3",
"bundled": true,
"dev": true,
"requires": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
}
},
"minimatch": {
"version": "3.1.2",
"bundled": true,
"dev": true,
"requires": {
"brace-expansion": "^1.1.7"
}
}
}
},
"safe-buffer": {
"version": "5.2.1",
"bundled": true,
"dev": true
},
"safer-buffer": {
"version": "2.1.2",
"bundled": true,
"dev": true,
"optional": true
},
"semver": {
"version": "7.3.7",
"bundled": true,
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
},
"dependencies": {
"lru-cache": {
"version": "6.0.0",
"bundled": true,
"dev": true,
"requires": {
"yallist": "^4.0.0"
}
}
}
},
"set-blocking": {
"version": "2.0.0",
"bundled": true,
"dev": true
},
"signal-exit": {
"version": "3.0.7",
"bundled": true,
"dev": true
},
"smart-buffer": {
"version": "4.2.0",
"bundled": true,
"dev": true
},
"socks": {
"version": "2.7.0",
"bundled": true,
"dev": true,
"requires": {
"ip": "^2.0.0",
"smart-buffer": "^4.2.0"
}
},
"socks-proxy-agent": {
"version": "7.0.0",
"bundled": true,
"dev": true,
"requires": {
"agent-base": "^6.0.2",
"debug": "^4.3.3",
"socks": "^2.6.2"
}
},
"spdx-correct": {
"version": "3.1.1",
"bundled": true,
"dev": true,
"requires": {
"spdx-expression-parse": "^3.0.0",
"spdx-license-ids": "^3.0.0"
}
},
"spdx-exceptions": {
"version": "2.3.0",
"bundled": true,
"dev": true
},
"spdx-expression-parse": {
"version": "3.0.1",
"bundled": true,
"dev": true,
"requires": {
"spdx-exceptions": "^2.1.0",
"spdx-license-ids": "^3.0.0"
}
},
"spdx-license-ids": {
"version": "3.0.11",
"bundled": true,
"dev": true
},
"ssri": {
"version": "9.0.1",
"bundled": true,
"dev": true,
"requires": {
"minipass": "^3.1.1"
}
},
"string_decoder": {
"version": "1.3.0",
"bundled": true,
"dev": true,
"requires": {
"safe-buffer": "~5.2.0"
}
},
"string-width": {
"version": "4.2.3",
"bundled": true,
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
}
},
"strip-ansi": {
"version": "6.0.1",
"bundled": true,
"dev": true,
"requires": {
"ansi-regex": "^5.0.1"
}
},
"supports-color": {
"version": "7.2.0",
"bundled": true,
"dev": true,
"requires": {
"has-flag": "^4.0.0"
}
},
"tar": {
"version": "6.1.11",
"bundled": true,
"dev": true,
"requires": {
"chownr": "^2.0.0",
"fs-minipass": "^2.0.0",
"minipass": "^3.0.0",
"minizlib": "^2.1.1",
"mkdirp": "^1.0.3",
"yallist": "^4.0.0"
}
},
"text-table": {
"version": "0.2.0",
"bundled": true,
"dev": true
},
"tiny-relative-date": {
"version": "1.3.0",
"bundled": true,
"dev": true
},
"treeverse": {
"version": "2.0.0",
"bundled": true,
"dev": true
},
"unique-filename": {
"version": "2.0.1",
"bundled": true,
"dev": true,
"requires": {
"unique-slug": "^3.0.0"
}
},
"unique-slug": {
"version": "3.0.0",
"bundled": true,
"dev": true,
"requires": {
"imurmurhash": "^0.1.4"
}
},
"util-deprecate": {
"version": "1.0.2",
"bundled": true,
"dev": true
},
"validate-npm-package-license": {
"version": "3.0.4",
"bundled": true,
"dev": true,
"requires": {
"spdx-correct": "^3.0.0",
"spdx-expression-parse": "^3.0.0"
}
},
"validate-npm-package-name": {
"version": "4.0.0",
"bundled": true,
"dev": true,
"requires": {
"builtins": "^5.0.0"
}
},
"walk-up-path": {
"version": "1.0.0",
"bundled": true,
"dev": true
},
"wcwidth": {
"version": "1.0.1",
"bundled": true,
"dev": true,
"requires": {
"defaults": "^1.0.3"
}
},
"which": {
"version": "2.0.2",
"bundled": true,
"dev": true,
"requires": {
"isexe": "^2.0.0"
}
},
"wide-align": {
"version": "1.1.5",
"bundled": true,
"dev": true,
"requires": {
"string-width": "^1.0.2 || 2 || 3 || 4"
}
},
"wrappy": {
"version": "1.0.2",
"bundled": true,
"dev": true
},
"write-file-atomic": {
"version": "4.0.2",
"bundled": true,
"dev": true,
"requires": {
"imurmurhash": "^0.1.4",
"signal-exit": "^3.0.7"
}
},
"yallist": {
"version": "4.0.0",
"bundled": true,
"dev": true
}
}
},
"npm-run-path": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
"integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
"dev": true,
"requires": {
"path-key": "^3.0.0"
}
},
"once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dev": true,
"requires": {
"wrappy": "1"
}
},
"onetime": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
"integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
"dev": true,
"requires": {
"mimic-fn": "^2.1.0"
}
},
"p-each-series": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/p-each-series/-/p-each-series-3.0.0.tgz",
"integrity": "sha512-lastgtAdoH9YaLyDa5i5z64q+kzOcQHsQ5SsZJD3q0VEyI8mq872S3geuNbRUQLVAE9siMfgKrpj7MloKFHruw==",
"dev": true
},
"p-filter": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/p-filter/-/p-filter-2.1.0.tgz",
"integrity": "sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==",
"dev": true,
"requires": {
"p-map": "^2.0.0"
}
},
"p-is-promise": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-3.0.0.tgz",
"integrity": "sha512-Wo8VsW4IRQSKVXsJCn7TomUaVtyfjVDn3nUP7kE967BQk0CwFpdbZs0X0uk5sW9mkBa9eNM7hCMaG93WUAwxYQ==",
"dev": true
},
"p-limit": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz",
"integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==",
"dev": true,
"requires": {
"yocto-queue": "^1.0.0"
}
},
"p-locate": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz",
"integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==",
"dev": true,
"requires": {
"p-limit": "^4.0.0"
}
},
"p-map": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz",
"integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==",
"dev": true
},
"p-reduce": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-2.1.0.tgz",
"integrity": "sha512-2USApvnsutq8uoxZBGbbWM0JIYLiEMJ9RlaN7fAzVNb9OZN0SHjjTTfIcb667XynS5Y1VhwDJVDa72TnPzAYWw==",
"dev": true
},
"p-retry": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz",
"integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==",
"dev": true,
"requires": {
"@types/retry": "0.12.0",
"retry": "^0.13.1"
}
},
"p-try": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
"dev": true
},
"parent-module": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
"integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
"dev": true,
"requires": {
"callsites": "^3.0.0"
}
},
"parse-json": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
"integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
"dev": true,
"requires": {
"@babel/code-frame": "^7.0.0",
"error-ex": "^1.3.1",
"json-parse-even-better-errors": "^2.3.0",
"lines-and-columns": "^1.1.6"
}
},
"path-exists": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz",
"integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==",
"dev": true
},
"path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
"dev": true
},
"path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"dev": true
},
"path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
"dev": true
},
"path-type": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
"integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
"dev": true
},
"picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"dev": true
},
"pify": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
"integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==",
"dev": true
},
"pkg-conf": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-2.1.0.tgz",
"integrity": "sha512-C+VUP+8jis7EsQZIhDYmS5qlNtjv2yP4SNtjXK9AP1ZcTRlnSfuumaTnRfYZnYgUUYVIKqL0fRvmUGDV2fmp6g==",
"dev": true,
"requires": {
"find-up": "^2.0.0",
"load-json-file": "^4.0.0"
},
"dependencies": {
"find-up": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz",
"integrity": "sha512-NWzkk0jSJtTt08+FBFMvXoeZnOJD+jTtsRmBYbAIzJdX6l7dLgR7CTubCM5/eDdPUBvLCeVasP1brfVR/9/EZQ==",
"dev": true,
"requires": {
"locate-path": "^2.0.0"
}
},
"locate-path": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz",
"integrity": "sha512-NCI2kiDkyR7VeEKm27Kda/iQHyKJe1Bu0FlTbYp3CqJu+9IFe9bLyAjMxf5ZDDbEg+iMPzB5zYyUTSm8wVTKmA==",
"dev": true,
"requires": {
"p-locate": "^2.0.0",
"path-exists": "^3.0.0"
}
},
"p-limit": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz",
"integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==",
"dev": true,
"requires": {
"p-try": "^1.0.0"
}
},
"p-locate": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz",
"integrity": "sha512-nQja7m7gSKuewoVRen45CtVfODR3crN3goVQ0DDZ9N3yHxgpkuBhZqsaiotSQRrADUrne346peY7kT3TSACykg==",
"dev": true,
"requires": {
"p-limit": "^1.1.0"
}
},
"p-try": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz",
"integrity": "sha512-U1etNYuMJoIz3ZXSrrySFjsXQTWOx2/jdi86L+2pRvph/qMKL6sbcCYdH23fqsbm8TH2Gn0OybpT4eSFlCVHww==",
"dev": true
},
"path-exists": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
"integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==",
"dev": true
}
}
},
"process-nextick-args": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
"dev": true
},
"proto-list": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz",
"integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==",
"dev": true
},
"q": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz",
"integrity": "sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw==",
"dev": true
},
"queue-microtask": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
"integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
"dev": true
},
"quick-lru": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz",
"integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==",
"dev": true
},
"rc": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
"integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
"dev": true,
"requires": {
"deep-extend": "^0.6.0",
"ini": "~1.3.0",
"minimist": "^1.2.0",
"strip-json-comments": "~2.0.1"
}
},
"read-pkg": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz",
"integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==",
"dev": true,
"requires": {
"@types/normalize-package-data": "^2.4.0",
"normalize-package-data": "^2.5.0",
"parse-json": "^5.0.0",
"type-fest": "^0.6.0"
},
"dependencies": {
"hosted-git-info": {
"version": "2.8.9",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz",
"integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==",
"dev": true
},
"normalize-package-data": {
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz",
"integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==",
"dev": true,
"requires": {
"hosted-git-info": "^2.1.4",
"resolve": "^1.10.0",
"semver": "2 || 3 || 4 || 5",
"validate-npm-package-license": "^3.0.1"
}
},
"semver": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
"integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
"dev": true
},
"type-fest": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz",
"integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==",
"dev": true
}
}
},
"read-pkg-up": {
"version": "9.1.0",
"resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-9.1.0.tgz",
"integrity": "sha512-vaMRR1AC1nrd5CQM0PhlRsO5oc2AAigqr7cCrZ/MW/Rsaflz4RlgzkpL4qoU/z1F6wrbd85iFv1OQj/y5RdGvg==",
"dev": true,
"requires": {
"find-up": "^6.3.0",
"read-pkg": "^7.1.0",
"type-fest": "^2.5.0"
},
"dependencies": {
"read-pkg": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-7.1.0.tgz",
"integrity": "sha512-5iOehe+WF75IccPc30bWTbpdDQLOCc3Uu8bi3Dte3Eueij81yx1Mrufk8qBx/YAbR4uL1FdUr+7BKXDwEtisXg==",
"dev": true,
"requires": {
"@types/normalize-package-data": "^2.4.1",
"normalize-package-data": "^3.0.2",
"parse-json": "^5.2.0",
"type-fest": "^2.0.0"
}
},
"type-fest": {
"version": "2.19.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz",
"integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==",
"dev": true
}
}
},
"readable-stream": {
"version": "2.3.7",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
"integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
"dev": true,
"requires": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.3",
"isarray": "~1.0.0",
"process-nextick-args": "~2.0.0",
"safe-buffer": "~5.1.1",
"string_decoder": "~1.1.1",
"util-deprecate": "~1.0.1"
}
},
"redent": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz",
"integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==",
"dev": true,
"requires": {
"indent-string": "^4.0.0",
"strip-indent": "^3.0.0"
}
},
"redeyed": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/redeyed/-/redeyed-2.1.1.tgz",
"integrity": "sha512-FNpGGo1DycYAdnrKFxCMmKYgo/mILAqtRYbkdQD8Ep/Hk2PQ5+aEAEx+IU713RTDmuBaH0c8P5ZozurNu5ObRQ==",
"dev": true,
"requires": {
"esprima": "~4.0.0"
}
},
"registry-auth-token": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.0.1.tgz",
"integrity": "sha512-UfxVOj8seK1yaIOiieV4FIP01vfBDLsY0H9sQzi9EbbUdJiuuBjJgLa1DpImXMNPnVkBD4eVxTEXcrZA6kfpJA==",
"dev": true,
"requires": {
"@pnpm/npm-conf": "^1.0.4"
}
},
"require-directory": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
"integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
"dev": true
},
"resolve": {
"version": "1.22.1",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz",
"integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==",
"dev": true,
"requires": {
"is-core-module": "^2.9.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
}
},
"resolve-from": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
"integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==",
"dev": true
},
"retry": {
"version": "0.13.1",
"resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
"integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==",
"dev": true
},
"reusify": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
"integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
"dev": true
},
"rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dev": true,
"requires": {
"glob": "^7.1.3"
}
},
"run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
"integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
"dev": true,
"requires": {
"queue-microtask": "^1.2.2"
}
},
"safe-buffer": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
"dev": true
},
"semantic-release": {
"version": "20.1.0",
"resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-20.1.0.tgz",
"integrity": "sha512-+9+n6RIr0Fz0F53cXrjpawxWlUg3O7/qr1jF9lrE+/v6WqwBrSWnavVHTPaf2WLerET2EngoqI0M4pahkKl6XQ==",
"dev": true,
"requires": {
"@semantic-release/commit-analyzer": "^9.0.2",
"@semantic-release/error": "^3.0.0",
"@semantic-release/github": "^8.0.0",
"@semantic-release/npm": "^9.0.0",
"@semantic-release/release-notes-generator": "^10.0.0",
"aggregate-error": "^4.0.1",
"cosmiconfig": "^8.0.0",
"debug": "^4.0.0",
"env-ci": "^8.0.0",
"execa": "^6.1.0",
"figures": "^5.0.0",
"find-versions": "^5.1.0",
"get-stream": "^6.0.0",
"git-log-parser": "^1.2.0",
"hook-std": "^3.0.0",
"hosted-git-info": "^6.0.0",
"lodash-es": "^4.17.21",
"marked": "^4.1.0",
"marked-terminal": "^5.1.1",
"micromatch": "^4.0.2",
"p-each-series": "^3.0.0",
"p-reduce": "^3.0.0",
"read-pkg-up": "^9.1.0",
"resolve-from": "^5.0.0",
"semver": "^7.3.2",
"semver-diff": "^4.0.0",
"signale": "^1.2.1",
"yargs": "^17.5.1"
},
"dependencies": {
"aggregate-error": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-4.0.1.tgz",
"integrity": "sha512-0poP0T7el6Vq3rstR8Mn4V/IQrpBLO6POkUSrN7RhyY+GF/InCFShQzsQ39T25gkHhLgSLByyAz+Kjb+c2L98w==",
"dev": true,
"requires": {
"clean-stack": "^4.0.0",
"indent-string": "^5.0.0"
}
},
"clean-stack": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-4.2.0.tgz",
"integrity": "sha512-LYv6XPxoyODi36Dp976riBtSY27VmFo+MKqEU9QCCWyTrdEPDog+RWA7xQWHi6Vbp61j5c4cdzzX1NidnwtUWg==",
"dev": true,
"requires": {
"escape-string-regexp": "5.0.0"
}
},
"escape-string-regexp": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz",
"integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==",
"dev": true
},
"execa": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/execa/-/execa-6.1.0.tgz",
"integrity": "sha512-QVWlX2e50heYJcCPG0iWtf8r0xjEYfz/OYLGDYH+IyjWezzPNxz63qNFOu0l4YftGWuizFVZHHs8PrLU5p2IDA==",
"dev": true,
"requires": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.1",
"human-signals": "^3.0.1",
"is-stream": "^3.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^5.1.0",
"onetime": "^6.0.0",
"signal-exit": "^3.0.7",
"strip-final-newline": "^3.0.0"
}
},
"human-signals": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-3.0.1.tgz",
"integrity": "sha512-rQLskxnM/5OCldHo+wNXbpVgDn5A17CUoKX+7Sokwaknlq7CdSnphy0W39GU8dw59XiCXmFXDg4fRuckQRKewQ==",
"dev": true
},
"indent-string": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/indent-string/-/indent-string-5.0.0.tgz",
"integrity": "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==",
"dev": true
},
"is-stream": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
"integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
"dev": true
},
"mimic-fn": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
"integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
"dev": true
},
"npm-run-path": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz",
"integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==",
"dev": true,
"requires": {
"path-key": "^4.0.0"
}
},
"onetime": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
"integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
"dev": true,
"requires": {
"mimic-fn": "^4.0.0"
}
},
"p-reduce": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-3.0.0.tgz",
"integrity": "sha512-xsrIUgI0Kn6iyDYm9StOpOeK29XM1aboGji26+QEortiFST1hGZaUQOLhtEbqHErPpGW/aSz6allwK2qcptp0Q==",
"dev": true
},
"path-key": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
"integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
"dev": true
},
"strip-final-newline": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
"integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
"dev": true
}
}
},
"semver": {
"version": "7.3.8",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz",
"integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
},
"dependencies": {
"lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
"requires": {
"yallist": "^4.0.0"
}
}
}
},
"semver-diff": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz",
"integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==",
"dev": true,
"requires": {
"semver": "^7.3.5"
}
},
"semver-regex": {
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-4.0.5.tgz",
"integrity": "sha512-hunMQrEy1T6Jr2uEVjrAIqjwWcQTgOAcIM52C8MY1EZSD3DDNft04XzvYKPqjED65bNVVko0YI38nYeEHCX3yw==",
"dev": true
},
"shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dev": true,
"requires": {
"shebang-regex": "^3.0.0"
}
},
"shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"dev": true
},
"signal-exit": {
"version": "3.0.7",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
"dev": true
},
"signale": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/signale/-/signale-1.4.0.tgz",
"integrity": "sha512-iuh+gPf28RkltuJC7W5MRi6XAjTDCAPC/prJUpQoG4vIP3MJZ+GTydVnodXA7pwvTKb2cA0m9OFZW/cdWy/I/w==",
"dev": true,
"requires": {
"chalk": "^2.3.2",
"figures": "^2.0.0",
"pkg-conf": "^2.1.0"
},
"dependencies": {
"figures": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz",
"integrity": "sha512-Oa2M9atig69ZkfwiApY8F2Yy+tzMbazyvqv21R0NsSC8floSOC09BbT1ITWAdoMGQvJ/aZnR1KMwdx9tvHnTNA==",
"dev": true,
"requires": {
"escape-string-regexp": "^1.0.5"
}
}
}
},
"slash": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
"integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
"dev": true
},
"source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
"dev": true
},
"spawn-error-forwarder": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/spawn-error-forwarder/-/spawn-error-forwarder-1.0.0.tgz",
"integrity": "sha512-gRjMgK5uFjbCvdibeGJuy3I5OYz6VLoVdsOJdA6wV0WlfQVLFueoqMxwwYD9RODdgb6oUIvlRlsyFSiQkMKu0g==",
"dev": true
},
"spdx-correct": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz",
"integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==",
"dev": true,
"requires": {
"spdx-expression-parse": "^3.0.0",
"spdx-license-ids": "^3.0.0"
}
},
"spdx-exceptions": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz",
"integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==",
"dev": true
},
"spdx-expression-parse": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz",
"integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==",
"dev": true,
"requires": {
"spdx-exceptions": "^2.1.0",
"spdx-license-ids": "^3.0.0"
}
},
"spdx-license-ids": {
"version": "3.0.12",
"resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.12.tgz",
"integrity": "sha512-rr+VVSXtRhO4OHbXUiAF7xW3Bo9DuuF6C5jH+q/x15j2jniycgKbxU09Hr0WqlSLUs4i4ltHGXqTe7VHclYWyA==",
"dev": true
},
"split": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz",
"integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==",
"dev": true,
"requires": {
"through": "2"
}
},
"split2": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/split2/-/split2-3.2.2.tgz",
"integrity": "sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==",
"dev": true,
"requires": {
"readable-stream": "^3.0.0"
},
"dependencies": {
"readable-stream": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
"integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
"dev": true,
"requires": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
}
}
}
},
"stream-combiner2": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/stream-combiner2/-/stream-combiner2-1.1.1.tgz",
"integrity": "sha512-3PnJbYgS56AeWgtKF5jtJRT6uFJe56Z0Hc5Ngg/6sI6rIt8iiMBTa9cvdyFfpMQjaVHr8dusbNeFGIIonxOvKw==",
"dev": true,
"requires": {
"duplexer2": "~0.1.0",
"readable-stream": "^2.0.2"
}
},
"string_decoder": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
"dev": true,
"requires": {
"safe-buffer": "~5.1.0"
}
},
"string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
}
},
"strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.1"
}
},
"strip-bom": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
"integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==",
"dev": true
},
"strip-final-newline": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
"integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
"dev": true
},
"strip-indent": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz",
"integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==",
"dev": true,
"requires": {
"min-indent": "^1.0.0"
}
},
"strip-json-comments": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
"integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==",
"dev": true
},
"supports-color": {
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
"dev": true,
"requires": {
"has-flag": "^3.0.0"
}
},
"supports-hyperlinks": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz",
"integrity": "sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==",
"dev": true,
"requires": {
"has-flag": "^4.0.0",
"supports-color": "^7.0.0"
},
"dependencies": {
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true
},
"supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"requires": {
"has-flag": "^4.0.0"
}
}
}
},
"supports-preserve-symlinks-flag": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
"integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
"dev": true
},
"temp-dir": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-2.0.0.tgz",
"integrity": "sha512-aoBAniQmmwtcKp/7BzsH8Cxzv8OL736p7v1ihGb5e9DJ9kTwGWHrQrVB5+lfVDzfGrdRzXch+ig7LHaY1JTOrg==",
"dev": true
},
"tempy": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/tempy/-/tempy-1.0.1.tgz",
"integrity": "sha512-biM9brNqxSc04Ee71hzFbryD11nX7VPhQQY32AdDmjFvodsRFz/3ufeoTZ6uYkRFfGo188tENcASNs3vTdsM0w==",
"dev": true,
"requires": {
"del": "^6.0.0",
"is-stream": "^2.0.0",
"temp-dir": "^2.0.0",
"type-fest": "^0.16.0",
"unique-string": "^2.0.0"
},
"dependencies": {
"type-fest": {
"version": "0.16.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.16.0.tgz",
"integrity": "sha512-eaBzG6MxNzEn9kiwvtre90cXaNLkmadMWa1zQMs3XORCXNbsH/OewwbxC5ia9dCxIxnTAsSxXJaa/p5y8DlvJg==",
"dev": true
}
}
},
"text-extensions": {
"version": "1.9.0",
"resolved": "https://registry.npmjs.org/text-extensions/-/text-extensions-1.9.0.tgz",
"integrity": "sha512-wiBrwC1EhBelW12Zy26JeOUkQ5mRu+5o8rpsJk5+2t+Y5vE7e842qtZDQ2g1NpX/29HdyFeJ4nSIhI47ENSxlQ==",
"dev": true
},
"through": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
"integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==",
"dev": true
},
"through2": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/through2/-/through2-4.0.2.tgz",
"integrity": "sha512-iOqSav00cVxEEICeD7TjLB1sueEL+81Wpzp2bY17uZjZN0pWZPuo4suZ/61VujxmqSGFfgOcNuTZ85QJwNZQpw==",
"dev": true,
"requires": {
"readable-stream": "3"
},
"dependencies": {
"readable-stream": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
"integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
"dev": true,
"requires": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
}
}
}
},
"to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"requires": {
"is-number": "^7.0.0"
}
},
"tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
"dev": true
},
"traverse": {
"version": "0.6.7",
"resolved": "https://registry.npmjs.org/traverse/-/traverse-0.6.7.tgz",
"integrity": "sha512-/y956gpUo9ZNCb99YjxG7OaslxZWHfCHAUUfshwqOXmxUIvqLjVO581BT+gM59+QV9tFe6/CGG53tsA1Y7RSdg==",
"dev": true
},
"trim-newlines": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz",
"integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==",
"dev": true
},
"type-fest": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz",
"integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==",
"dev": true
},
"uglify-js": {
"version": "3.17.4",
"resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.17.4.tgz",
"integrity": "sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==",
"dev": true,
"optional": true
},
"unique-string": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz",
"integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==",
"dev": true,
"requires": {
"crypto-random-string": "^2.0.0"
}
},
"universal-user-agent": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz",
"integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==",
"dev": true
},
"universalify": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
"integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==",
"dev": true
},
"url-join": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz",
"integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==",
"dev": true
},
"util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
"dev": true
},
"validate-npm-package-license": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz",
"integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==",
"dev": true,
"requires": {
"spdx-correct": "^3.0.0",
"spdx-expression-parse": "^3.0.0"
}
},
"webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
"dev": true
},
"whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"dev": true,
"requires": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dev": true,
"requires": {
"isexe": "^2.0.0"
}
},
"wordwrap": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
"integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==",
"dev": true
},
"wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dev": true,
"requires": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
}
}
},
"wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"dev": true
},
"xtend": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
"dev": true
},
"y18n": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
"integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
"dev": true
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
"dev": true
},
"yargs": {
"version": "17.7.1",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.1.tgz",
"integrity": "sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==",
"dev": true,
"requires": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"dependencies": {
"yargs-parser": {
"version": "21.1.1",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
"integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
"dev": true
}
}
},
"yargs-parser": {
"version": "20.2.9",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz",
"integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==",
"dev": true
},
"yocto-queue": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz",
"integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==",
"dev": true
}
}
}
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dependency-file-generator/CONTRIBUTING.md | # Contributing
## Releases
Releases for `dependency-file-generator` are handled by [semantic-release][semantic-release]. To ensure that every commit on the `main` branch has a semantic commit message, the following settings have been configured:
- Only squash commits are allowed
- The default squash commit message is derived from the pull-request's title and body
- Pull request titles are required to be semantic commit messages
The table below (from [semantic-release][semantic-release] docs) shows the types of changes that correspond to each release type.
| Commit message | Release type |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- |
| `fix(pencil): stop graphite breaking when too much pressure applied` | Patch Release |
| `feat(pencil): add 'graphiteWidth' option` | Minor Release |
| `perf(pencil): remove graphiteWidth option`<br><br>`BREAKING CHANGE: The graphiteWidth option has been removed.`<br>`The default graphite width of 10mm is always used for performance reasons.` | Major <br /> (Note that the `BREAKING CHANGE: ` string must be in the body of the pull-request) |
If a change type not listed in the table above is used, it will not trigger a release. For example:
- `docs: fix README type`
- `ci: update GHAs workflow`
- `chore: some miscellaneous work`
The source of truth for these rules is [semantic-release/commit-analyzer](https://github.com/semantic-release/commit-analyzer). The `angular` preset is used by default, which is documented [here](https://github.com/conventional-changelog/conventional-changelog/tree/master/packages/conventional-changelog-angular).
[semantic-release]: https://github.com/semantic-release/semantic-release
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dependency-file-generator/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/dependency-file-generator | rapidsai_public_repos/dependency-file-generator/tests/test_examples.py | import glob
import os
import pathlib
import shutil
import jsonschema
import pytest
import yaml
from jsonschema.exceptions import ValidationError
from rapids_dependency_file_generator.cli import main
CURRENT_DIR = pathlib.Path(__file__).parent
# Erroneous examples raise runtime errors from the generator.
_erroneous_examples = ("no-specific-match", "pyproject_matrix")
ERRONEOUS_EXAMPLE_FILES = [CURRENT_DIR / "examples" / ex for ex in _erroneous_examples]
EXAMPLE_FILES = [
pth
for pth in CURRENT_DIR.glob("examples/*/dependencies.yaml")
if all(ex not in str(pth.absolute()) for ex in _erroneous_examples)
]
# Invalid examples raise validation errors upon schema validation.
INVALID_EXAMPLE_FILES = list(CURRENT_DIR.glob("examples/invalid/*/dependencies.yaml"))
def make_file_set(file_dir):
return {
pathlib.Path(f).relative_to(file_dir)
for f in glob.glob(str(file_dir) + "/**", recursive=True)
if pathlib.Path(f).is_file()
}
@pytest.fixture(
params=[example_file.parent for example_file in EXAMPLE_FILES],
ids=[example_file.parent.stem for example_file in EXAMPLE_FILES],
)
def example_dir(request):
return request.param
@pytest.fixture(
params=[example_file.parent for example_file in INVALID_EXAMPLE_FILES],
ids=[example_file.parent.stem for example_file in INVALID_EXAMPLE_FILES],
)
def invalid_example_dir(request):
return request.param
def test_examples(example_dir):
expected_dir = example_dir.joinpath("output", "expected")
actual_dir = example_dir.joinpath("output", "actual")
dep_file_path = example_dir.joinpath("dependencies.yaml")
# Copy pyproject.toml files from expected to actual since they are modified in place
for dirpath, _, filenames in os.walk(expected_dir):
for filename in filenames:
if filename == "pyproject.toml":
full_path = pathlib.Path(dirpath) / filename
relative_path = full_path.relative_to(expected_dir)
new_path = actual_dir / relative_path
new_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(full_path, new_path)
main(
[
"--config",
str(dep_file_path),
"--clean",
str(example_dir.joinpath("output", "actual")),
]
)
expected_file_set = make_file_set(expected_dir)
actual_file_set = make_file_set(actual_dir)
assert expected_file_set == actual_file_set
for file in actual_file_set:
actual_file = open(actual_dir.joinpath(file)).read()
expected_file = open(expected_dir.joinpath(file)).read()
assert actual_file == expected_file
@pytest.mark.parametrize("test_name", ["no-specific-match", "pyproject_matrix"])
def test_error_examples(test_name):
test_dir = CURRENT_DIR.joinpath("examples", test_name)
dep_file_path = test_dir.joinpath("dependencies.yaml")
with pytest.raises(ValueError):
main(
[
"--config",
str(dep_file_path),
"--clean",
str(test_dir.joinpath("output", "actual")),
]
)
def test_examples_are_valid(schema, example_dir):
dep_file_path = example_dir / "dependencies.yaml"
instance = yaml.load(dep_file_path.read_text(), Loader=yaml.SafeLoader)
jsonschema.validate(instance, schema=schema)
def test_invalid_examples_are_invalid(schema, invalid_example_dir):
dep_file_path = invalid_example_dir / "dependencies.yaml"
instance = yaml.load(dep_file_path.read_text(), Loader=yaml.SafeLoader)
with pytest.raises(ValidationError):
jsonschema.validate(instance, schema=schema)
| 0 |
rapidsai_public_repos/dependency-file-generator | rapidsai_public_repos/dependency-file-generator/tests/conftest.py | import pytest
from rapids_dependency_file_generator.rapids_dependency_file_validator import SCHEMA
@pytest.fixture(scope="session")
def schema():
return SCHEMA
| 0 |
rapidsai_public_repos/dependency-file-generator | rapidsai_public_repos/dependency-file-generator/tests/test_schema.py | import jsonschema
def test_schema_is_valid(schema):
jsonschema.Draft7Validator.check_schema(schema)
| 0 |
rapidsai_public_repos/dependency-file-generator | rapidsai_public_repos/dependency-file-generator/tests/test_cli.py | import pytest
from rapids_dependency_file_generator.cli import generate_matrix, validate_args
def test_generate_matrix():
matrix = generate_matrix("cuda=11.5;arch=x86_64")
assert matrix == {"cuda": ["11.5"], "arch": ["x86_64"]}
matrix = generate_matrix(None)
assert matrix == {}
def test_validate_args():
# Missing output
with pytest.raises(Exception):
validate_args(["--matrix", "cuda=11.5;arch=x86_64", "--file_key", "all"])
# Missing matrix
with pytest.raises(Exception):
validate_args(["--output", "conda", "--file_key", "all"])
# Missing file_key
with pytest.raises(Exception):
validate_args(["--output", "conda", "--matrix", "cuda=11.5;arch=x86_64"])
# Valid
validate_args(
[
"--output",
"conda",
"--matrix",
"cuda=11.5;arch=x86_64",
"--file_key",
"all",
]
)
| 0 |
rapidsai_public_repos/dependency-file-generator | rapidsai_public_repos/dependency-file-generator/tests/test_rapids_dependency_file_generator.py | from unittest import mock
import pytest
import yaml
from rapids_dependency_file_generator.constants import OutputTypes, cli_name
from rapids_dependency_file_generator.rapids_dependency_file_generator import (
dedupe,
get_requested_output_types,
make_dependency_file,
should_use_specific_entry,
)
def test_dedupe():
# simple list
deduped = dedupe(["dep1", "dep1", "dep2"])
assert deduped == ["dep1", "dep2"]
# list w/ pip dependencies
deduped = dedupe(
[
"dep1",
"dep1",
{"pip": ["pip_dep1", "pip_dep2"]},
{"pip": ["pip_dep1", "pip_dep2"]},
]
)
assert deduped == ["dep1", {"pip": ["pip_dep1", "pip_dep2"]}]
@mock.patch(
"rapids_dependency_file_generator.rapids_dependency_file_generator.os.path.relpath"
)
def test_make_dependency_file(mock_relpath):
relpath = "../../config_file.yaml"
mock_relpath.return_value = relpath
header = f"""\
# This file is generated by `{cli_name}`.
# To make changes, edit {relpath} and run `{cli_name}`.
"""
env = make_dependency_file(
"conda",
"tmp_env.yaml",
"config_file",
"output_path",
["rapidsai", "nvidia"],
["dep1", "dep2"],
)
assert env == header + yaml.dump(
{
"name": "tmp_env",
"channels": ["rapidsai", "nvidia"],
"dependencies": ["dep1", "dep2"],
}
)
env = make_dependency_file(
"requirements",
"tmp_env.txt",
"config_file",
"output_path",
["rapidsai", "nvidia"],
["dep1", "dep2"],
)
assert env == header + "dep1\ndep2\n"
def test_should_use_specific_entry():
# no match
matrix_combo = {"cuda": "11.5", "arch": "x86_64"}
specific_entry = {"cuda": "11.6"}
result = should_use_specific_entry(matrix_combo, specific_entry)
assert result is False
# one match
matrix_combo = {"cuda": "11.5", "arch": "x86_64"}
specific_entry = {"cuda": "11.5"}
result = should_use_specific_entry(matrix_combo, specific_entry)
assert result is True
# many matches
matrix_combo = {"cuda": "11.5", "arch": "x86_64", "python": "3.6"}
specific_entry = {"cuda": "11.5", "arch": "x86_64"}
result = should_use_specific_entry(matrix_combo, specific_entry)
assert result is True
def test_get_requested_output_types():
result = get_requested_output_types(str(OutputTypes.NONE))
assert result == []
result = get_requested_output_types([str(OutputTypes.NONE)])
assert result == []
result = get_requested_output_types(str(OutputTypes.CONDA))
assert result == [str(OutputTypes.CONDA)]
result = get_requested_output_types([str(OutputTypes.CONDA)])
assert result == [str(OutputTypes.CONDA)]
result = get_requested_output_types(str(OutputTypes.REQUIREMENTS))
assert result == [str(OutputTypes.REQUIREMENTS)]
result = get_requested_output_types([str(OutputTypes.REQUIREMENTS)])
assert result == [str(OutputTypes.REQUIREMENTS)]
result = get_requested_output_types(
[str(OutputTypes.REQUIREMENTS), str(OutputTypes.CONDA)]
)
assert result == [str(OutputTypes.REQUIREMENTS), str(OutputTypes.CONDA)]
with pytest.raises(ValueError):
get_requested_output_types("invalid_value")
with pytest.raises(ValueError):
get_requested_output_types(["invalid_value"])
with pytest.raises(ValueError):
get_requested_output_types([str(OutputTypes.NONE), str(OutputTypes.CONDA)])
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples | rapidsai_public_repos/dependency-file-generator/tests/examples/requirements-minimal/dependencies.yaml | files:
all:
output: requirements
requirements_dir: output/actual
matrix:
cuda: ["11.5", "11.6"]
arch: [x86_64]
includes:
- build
py_build:
output: pyproject
pyproject_dir: output/actual
includes:
- python_build_dependencies
extras:
table: build-system
py_run:
output: pyproject
pyproject_dir: output/actual
includes:
- python_run_dependencies
extras:
table: project
py_optional_test:
output: pyproject
pyproject_dir: output/actual
includes:
- python_test_dependencies
extras:
table: project.optional-dependencies
key: test
channels:
- rapidsai
- conda-forge
dependencies:
build:
common:
- output_types: [conda, requirements]
packages:
- clang=11.1.0
- output_types: requirements
packages:
- spdlog>=1.8.5,<1.9
specific:
- output_types: [conda, requirements]
matrices:
- matrix:
cuda: "11.5"
packages:
- cuda-python>=11.5,<11.7.1
- matrix:
cuda: "11.6"
packages:
- cuda-python>=11.6,<11.7.1
- output_types: requirements
matrices:
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
python_build_dependencies:
common:
- output_types: pyproject
packages:
- setuptools
python_run_dependencies:
common:
- output_types: pyproject
packages:
- numpy
- scipy
python_test_dependencies:
common:
- output_types: pyproject
packages:
- scikit-image
- scikit-learn
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/requirements-minimal/output | rapidsai_public_repos/dependency-file-generator/tests/examples/requirements-minimal/output/expected/requirements_all_cuda-116_arch-x86_64.txt | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
clang=11.1.0
cuda-python>=11.6,<11.7.1
cudatoolkit=11.6
spdlog>=1.8.5,<1.9
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/requirements-minimal/output | rapidsai_public_repos/dependency-file-generator/tests/examples/requirements-minimal/output/expected/pyproject.toml | [build-system]
build-backend = "setuptools.build_meta"
requires = [
"setuptools",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
[project]
name = "test"
version = "0.0.0"
dependencies = [
"numpy",
"scipy",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
[project.optional-dependencies]
test = [
"scikit-image",
"scikit-learn",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/requirements-minimal/output | rapidsai_public_repos/dependency-file-generator/tests/examples/requirements-minimal/output/expected/requirements_all_cuda-115_arch-x86_64.txt | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
clang=11.1.0
cuda-python>=11.5,<11.7.1
cudatoolkit=11.5
spdlog>=1.8.5,<1.9
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples | rapidsai_public_repos/dependency-file-generator/tests/examples/no-specific-match/dependencies.yaml | files:
all:
output: conda
requirements_dir: output/actual
matrix:
cuda: ["11.8"]
includes:
- cudatoolkit
channels:
- rapidsai
- conda-forge
dependencies:
cudatoolkit:
specific:
- output_types: conda
matrices:
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples | rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback/dependencies.yaml | files:
all:
output: conda
conda_dir: output/actual
matrix:
cuda: ["11.5", "11.8"]
includes:
- cudatoolkit
channels:
- rapidsai
- conda-forge
dependencies:
cudatoolkit:
specific:
- output_types: conda
matrices:
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
- matrix:
packages:
- cudatoolkit
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback/output | rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback/output/expected/all_cuda-118.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- cudatoolkit
name: all_cuda-118
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback/output | rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback/output/expected/all_cuda-115.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- cudatoolkit=11.5
name: all_cuda-115
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples | rapidsai_public_repos/dependency-file-generator/tests/examples/conda-minimal/dependencies.yaml | files:
build:
output: conda
conda_dir: output/actual
matrix:
cuda: ["11.5", "11.6"]
arch: [x86_64]
includes:
- build
channels:
- rapidsai
- conda-forge
dependencies:
build:
common:
- output_types: [conda, requirements]
packages:
- clang=11.1.0
- spdlog>=1.8.5,<1.9
- output_types: conda
packages:
- pip
- pip:
- git+https://github.com/python-streamz/streamz.git@master
specific:
- output_types: [conda, requirements]
matrices:
- matrix:
cuda: "11.5"
packages:
- cuda-python>=11.5,<11.7.1
- matrix:
cuda: "11.6"
packages:
- cuda-python>=11.6,<11.7.1
- output_types: conda
matrices:
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/conda-minimal/output | rapidsai_public_repos/dependency-file-generator/tests/examples/conda-minimal/output/expected/build_cuda-115_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang=11.1.0
- cuda-python>=11.5,<11.7.1
- cudatoolkit=11.5
- pip
- spdlog>=1.8.5,<1.9
- pip:
- git+https://github.com/python-streamz/streamz.git@master
name: build_cuda-115_arch-x86_64
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/conda-minimal/output | rapidsai_public_repos/dependency-file-generator/tests/examples/conda-minimal/output/expected/build_cuda-116_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang=11.1.0
- cuda-python>=11.6,<11.7.1
- cudatoolkit=11.6
- pip
- spdlog>=1.8.5,<1.9
- pip:
- git+https://github.com/python-streamz/streamz.git@master
name: build_cuda-116_arch-x86_64
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples | rapidsai_public_repos/dependency-file-generator/tests/examples/no-matrix/dependencies.yaml | files:
checks:
output: conda
conda_dir: output/actual
includes:
- checks
channels:
- rapidsai
- conda-forge
dependencies:
checks:
common:
- output_types: [conda, requirements]
packages:
- clang=11.1.0
- spdlog>=1.8.5,<1.9
- output_types: conda
packages:
- pip
- pip:
- git+https://github.com/python-streamz/streamz.git@master
specific:
- output_types: [conda, requirements]
matrices:
- matrix:
cuda: "11.5"
packages:
- cuda-python>=11.5,<11.7.1
- matrix:
cuda: "11.6"
packages:
- cuda-python>=11.6,<11.7.1
- matrix:
packages:
- default-cuda-python
- output_types: conda
matrices:
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
- matrix:
packages:
- default-cudatoolkit
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/no-matrix/output | rapidsai_public_repos/dependency-file-generator/tests/examples/no-matrix/output/expected/checks.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang=11.1.0
- default-cuda-python
- default-cudatoolkit
- pip
- spdlog>=1.8.5,<1.9
- pip:
- git+https://github.com/python-streamz/streamz.git@master
name: checks
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/invalid | rapidsai_public_repos/dependency-file-generator/tests/examples/invalid/invalid-requirement/dependencies.yaml | files:
build:
output: conda
conda_dir: output/actual
matrix:
cuda: ["11.5", "11.6"]
arch: [x86_64]
includes:
- build
channels:
- rapidsai
- conda-forge
dependencies:
build:
common:
- output_types: [conda, requirements]
packages:
- clang=11.1.0
- spdlog>=1.8.5,<1.9
- output_types: conda
packages:
- pip
- pip:
- git+https://github.com/python-streamz/streamz.git@master
specific:
- output_types: [conda, requirements]
matrices:
- matrix:
cuda: "11.5"
packages:
- 1234
- cuda-python>=11.5,<11.7.1
- matrix:
cuda: "11.6"
packages:
- cuda-python>=11.6,<11.7.1
- output_types: conda
matrices:
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/invalid | rapidsai_public_repos/dependency-file-generator/tests/examples/invalid/pyproject_bad_key/dependencies.yaml | files:
py_build:
output: pyproject
pyproject_dir: output/actual
matrix:
cuda: ["11.5", "11.6"]
includes:
- build
extras:
table: build-system
key: dependencies
channels:
- rapidsai
- conda-forge
dependencies:
build:
specific:
- output_types: [conda, requirements]
matrices:
- matrix:
cuda: "11.5"
packages:
- cuda-python>=11.5,<11.7.1
- matrix:
cuda: "11.6"
packages:
- cuda-python>=11.6,<11.7.1
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples | rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback-first/dependencies.yaml | files:
all:
output: conda
conda_dir: output/actual
matrix:
cuda: ["11.5", "11.8"]
includes:
- cudatoolkit
channels:
- rapidsai
- conda-forge
dependencies:
cudatoolkit:
specific:
- output_types: conda
matrices:
- matrix:
packages:
- cudatoolkit
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback-first/output | rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback-first/output/expected/all_cuda-118.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- cudatoolkit
name: all_cuda-118
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback-first/output | rapidsai_public_repos/dependency-file-generator/tests/examples/specific-fallback-first/output/expected/all_cuda-115.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- cudatoolkit=11.5
name: all_cuda-115
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples | rapidsai_public_repos/dependency-file-generator/tests/examples/pyproject_matrix/dependencies.yaml | files:
py_build:
output: pyproject
pyproject_dir: output/actual
matrix:
cuda: ["11.5", "11.6"]
includes:
- build
extras:
table: build-system
channels:
- rapidsai
- conda-forge
dependencies:
build:
specific:
- output_types: [conda, requirements]
matrices:
- matrix:
cuda: "11.5"
packages:
- cuda-python>=11.5,<11.7.1
- matrix:
cuda: "11.6"
packages:
- cuda-python>=11.6,<11.7.1
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples | rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/dependencies.yaml | files:
dev:
output: conda
conda_dir: output/actual
matrix:
cuda: ["11.5", "11.6"]
arch: [x86_64, arm64]
py: ["3.8", "3.9"]
includes:
- build
channels:
- rapidsai
- conda-forge
dependencies:
build:
common:
- output_types: [conda]
packages:
- clang-tools=11.1.0
- spdlog>=1.8.5,<1.9
specific:
- output_types: [conda]
matrices:
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
- output_types: [conda]
matrices:
- matrix:
arch: x86_64
py: "3.9"
packages:
- some_amd64_39_build_dep
- matrix:
packages:
- output_types: [conda]
matrices:
- matrix:
arch: arm64
cuda: "11.5"
py: "3.8"
packages:
- super_specific_dep
- matrix:
packages:
- output_types: [conda]
matrices:
- matrix:
cuda: "11.5"
py: "3.8"
packages:
- some_115_38_build_dep
- matrix:
packages:
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output | rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output/expected/dev_cuda-116_arch-x86_64_py-39.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang-tools=11.1.0
- cudatoolkit=11.6
- some_amd64_39_build_dep
- spdlog>=1.8.5,<1.9
name: dev_cuda-116_arch-x86_64_py-39
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output | rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output/expected/dev_cuda-116_arch-arm64_py-38.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang-tools=11.1.0
- cudatoolkit=11.6
- spdlog>=1.8.5,<1.9
name: dev_cuda-116_arch-arm64_py-38
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output | rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output/expected/dev_cuda-115_arch-arm64_py-39.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang-tools=11.1.0
- cudatoolkit=11.5
- spdlog>=1.8.5,<1.9
name: dev_cuda-115_arch-arm64_py-39
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output | rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output/expected/dev_cuda-116_arch-x86_64_py-38.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang-tools=11.1.0
- cudatoolkit=11.6
- spdlog>=1.8.5,<1.9
name: dev_cuda-116_arch-x86_64_py-38
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output | rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output/expected/dev_cuda-116_arch-arm64_py-39.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang-tools=11.1.0
- cudatoolkit=11.6
- spdlog>=1.8.5,<1.9
name: dev_cuda-116_arch-arm64_py-39
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output | rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output/expected/dev_cuda-115_arch-x86_64_py-39.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang-tools=11.1.0
- cudatoolkit=11.5
- some_amd64_39_build_dep
- spdlog>=1.8.5,<1.9
name: dev_cuda-115_arch-x86_64_py-39
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output | rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output/expected/dev_cuda-115_arch-x86_64_py-38.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang-tools=11.1.0
- cudatoolkit=11.5
- some_115_38_build_dep
- spdlog>=1.8.5,<1.9
name: dev_cuda-115_arch-x86_64_py-38
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output | rapidsai_public_repos/dependency-file-generator/tests/examples/matrix/output/expected/dev_cuda-115_arch-arm64_py-38.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- clang-tools=11.1.0
- cudatoolkit=11.5
- some_115_38_build_dep
- spdlog>=1.8.5,<1.9
- super_specific_dep
name: dev_cuda-115_arch-arm64_py-38
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples | rapidsai_public_repos/dependency-file-generator/tests/examples/integration/dependencies.yaml | files:
all:
output: [conda, requirements]
requirements_dir: output/actual
conda_dir: output/actual
matrix:
cuda: ["11.5", "11.6"]
includes:
- build
- test
test:
output: none
includes:
- test
channels:
- rapidsai
- conda-forge
dependencies:
build:
common:
- output_types: [conda, requirements]
packages:
- black=22.3.0
- clang=11.1.0
- output_types: conda
packages:
- clang-tools=11.1.0
- spdlog>=1.8.5,<1.9
- output_types: requirements
packages:
- some_common_req_misc_dep
specific:
- output_types: [conda, requirements]
matrices:
- matrix:
cuda: "11.5"
packages:
- cuda-python>=11.5,<11.7.1
- matrix:
cuda: "11.6"
packages:
- cuda-python>=11.6,<11.7.1
- output_types: conda
matrices:
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- matrix:
cuda: "11.6"
packages:
- cudatoolkit=11.6
test:
common:
- output_types: [conda, requirements]
packages:
- pytest
- pytest-cov
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/integration/output | rapidsai_public_repos/dependency-file-generator/tests/examples/integration/output/expected/all_cuda-116.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- black=22.3.0
- clang-tools=11.1.0
- clang=11.1.0
- cuda-python>=11.6,<11.7.1
- cudatoolkit=11.6
- pytest
- pytest-cov
- spdlog>=1.8.5,<1.9
name: all_cuda-116
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/integration/output | rapidsai_public_repos/dependency-file-generator/tests/examples/integration/output/expected/requirements_all_cuda-116.txt | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
black=22.3.0
clang=11.1.0
cuda-python>=11.6,<11.7.1
pytest
pytest-cov
some_common_req_misc_dep
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/integration/output | rapidsai_public_repos/dependency-file-generator/tests/examples/integration/output/expected/requirements_all_cuda-115.txt | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
black=22.3.0
clang=11.1.0
cuda-python>=11.5,<11.7.1
pytest
pytest-cov
some_common_req_misc_dep
| 0 |
rapidsai_public_repos/dependency-file-generator/tests/examples/integration/output | rapidsai_public_repos/dependency-file-generator/tests/examples/integration/output/expected/all_cuda-115.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- black=22.3.0
- clang-tools=11.1.0
- clang=11.1.0
- cuda-python>=11.5,<11.7.1
- cudatoolkit=11.5
- pytest
- pytest-cov
- spdlog>=1.8.5,<1.9
name: all_cuda-115
| 0 |
rapidsai_public_repos/dependency-file-generator | rapidsai_public_repos/dependency-file-generator/ci/update-versions.sh | #!/bin/bash
# Updates the version string in `_version.py`
# Per https://github.com/semantic-release/exec:
# - stderr is used for logging
# - stdout is used for printing why verification failed
set -ue
NEXT_VERSION=$1
{
sed -i "/__version__/ s/\".*\"/\"${NEXT_VERSION}\"/" src/rapids_dependency_file_generator/_version.py
sed -i "/\$id/ s|/v[^/]*/|/v${NEXT_VERSION}/|" src/rapids_dependency_file_generator/schema.json
sed -i "/\"version\":/ s|: \".*\"|: \"${NEXT_VERSION}\"|" package.json
cat \
src/rapids_dependency_file_generator/_version.py \
src/rapids_dependency_file_generator/schema.json \
package.json
} 1>&2
| 0 |
rapidsai_public_repos/dependency-file-generator | rapidsai_public_repos/dependency-file-generator/ci/build-test.sh | #!/bin/bash
# Builds and tests Python package
# Per https://github.com/semantic-release/exec:
# - stderr is used for logging
# - stdout is used for printing why verification failed
set -ue
{
pip install build pytest
python -m build .
for PKG in dist/*; do
echo "$PKG"
pip uninstall -y rapids-dependency-file-generator
pip install "$PKG"
pytest
rapids-dependency-file-generator -h # test CLI output
done
} 1>&2
| 0 |
rapidsai_public_repos/dependency-file-generator | rapidsai_public_repos/dependency-file-generator/ci/pypi-publish.sh | #!/bin/bash
# Uploads packages to PyPI
# Per https://github.com/semantic-release/exec:
# - stderr is used for logging
# - stdout is used for returning release information
set -ue
{
pip install twine
twine upload \
--username __token__ \
--password "${PYPI_TOKEN}" \
--disable-progress-bar \
dist/*
} 1>&2
jq -ncr '{name: "PyPI release", url: "https://pypi.org/project/rapids-dependency-file-generator/"}'
| 0 |
rapidsai_public_repos/dependency-file-generator/src | rapidsai_public_repos/dependency-file-generator/src/rapids_dependency_file_generator/schema.json | {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "https://raw.githubusercontent.com/rapidsai/dependency-file-generator/v1.7.1/src/rapids_dependency_file_generator/schema.json",
"type": "object",
"title": "RAPIDS Package Dependency Specification Format",
"description": "Consolidated specification of RAPIDS project dependencies",
"properties": {
"files": {
"type": "object",
"patternProperties": {
".*": {
"type": "object",
"properties": {
"output": {"$ref": "#/$defs/outputs"},
"extras": {"$ref": "#/$defs/extras"},
"includes": {"type": "array", "items": {"type": "string"}},
"matrix": {"$ref": "#/$defs/matrix"},
"requirements_dir": {"type": "string"},
"conda_dir": {"type": "string"},
"pyproject_dir": {"type": "string"}
},
"additionalProperties": false,
"required": ["output", "includes"]
}
},
"minProperties": 1
},
"dependencies": {
"type": "object",
"patternProperties": {
".*": {
"type": "object",
"properties": {
"common": {
"type": "array",
"items": {
"type": "object",
"properties": {
"output_types": {"$ref": "#/$defs/outputs"},
"packages": {"$ref": "#/$defs/packages"}
},
"required": ["output_types", "packages"],
"additionalProperties": false
}
},
"specific": {
"type": "array",
"items": {
"type": "object",
"properties": {
"output_types": {"$ref": "#/$defs/outputs"},
"matrices": {"$ref": "#/$defs/matrices"}
},
"required": ["output_types", "matrices"],
"additionalProperties": false
}
}
},
"minProperties": 1,
"additionalProperties": false
}
}
},
"channels": {"$ref": "#/$defs/channels"}
},
"required": ["files", "dependencies"],
"additionalProperties": false,
"$defs": {
"channel": {
"type": "string",
"format": "iri-reference"
},
"channel-list": {
"type": "array",
"items": {
"$ref": "#/$defs/channel"
}
},
"channels": {
"$oneOf": [
{"$ref": "#/$defs/channel"},
{"$ref": "#/$defs/channel-list"}
]
},
"matrix": {
"type": "object",
"patternProperties": {
".*": {
"type": "array",
"items": {"type": "string"}
}
}
},
"matrix-matcher": {
"type": "object",
"properties": {
"matrix": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".*": {"type": "string"}
}
},
{"type": "null"}
]
},
"packages": {"oneOf": [
{"$ref": "#/$defs/requirements"},
{"type": "null"}
]}
},
"requiredProperties": ["matrix", "packages"],
"additionalProperties": false
},
"matrices": {
"type": "array",
"items": {"$ref": "#/$defs/matrix-matcher"}
},
"output-types": {
"enum": ["conda", "requirements", "pyproject"]
},
"output-types-array": {
"type": "array",
"item": {"$ref": "#/$defs/output-types"}
},
"outputs": {
"oneOf": [
{"$ref": "#/$defs/output-types"},
{"$ref": "#/$defs/output-types-array"},
{"const": "none"}
]
},
"packages": {
"type": "array",
"items": {
"oneOf": [
{"$ref": "#/$defs/requirement"},
{"$ref": "#/$defs/pip-requirements"}
]
}
},
"requirement": {
"type": "string"
},
"requirements": {
"type": "array",
"items": {
"$ref": "#/$defs/requirement"
},
"minItems": 1
},
"pip-requirements": {
"type": "object",
"properties": {
"pip": {"$ref": "#/$defs/requirements"}
},
"additionalProperties": false,
"required": ["pip"]
},
"extras": {
"type": "object",
"properties": {
"table": {
"type": "string",
"enum": ["build-system", "project", "project.optional-dependencies"]
},
"key": {"type": "string"}
},
"if": {
"properties": { "table": { "const": "project.optional-dependencies" } }
},
"then": {
"required": ["key"]
},
"else": {
"not": {
"required": ["key"]
}
},
"additionalProperties": false
}
}
}
| 0 |
rapidsai_public_repos/dependency-file-generator/src | rapidsai_public_repos/dependency-file-generator/src/rapids_dependency_file_generator/cli.py | import argparse
import os
import yaml
from ._version import __version__ as version
from .constants import OutputTypes, default_dependency_file_path
from .rapids_dependency_file_generator import (
delete_existing_files,
make_dependency_files,
)
from .rapids_dependency_file_validator import validate_dependencies
def validate_args(argv):
parser = argparse.ArgumentParser(
description=f"Generates dependency files for RAPIDS libraries (version: {version})"
)
parser.add_argument(
"--config",
default=default_dependency_file_path,
help="Path to YAML config file",
)
parser.add_argument(
"--clean",
nargs="?",
default=None,
const="",
help=(
"Delete any files previously created by dfg before running. An optional "
"path to clean may be provided, otherwise the current working directory "
"is used as the root from which to clean."
),
)
codependent_args = parser.add_argument_group("optional, but codependent")
codependent_args.add_argument(
"--file_key",
help="The file key from `dependencies.yaml` to generate",
)
codependent_args.add_argument(
"--output",
help="The output file type to generate",
choices=[str(x) for x in [OutputTypes.CONDA, OutputTypes.REQUIREMENTS]],
)
codependent_args.add_argument(
"--matrix",
help=(
"String representing which matrix combination should be generated, "
'such as `--matrix "cuda=11.5;arch=x86_64"`. May also be an empty string'
),
)
args = parser.parse_args(argv)
dependent_arg_keys = ["file_key", "output", "matrix"]
dependent_arg_values = [getattr(args, key) is None for key in dependent_arg_keys]
if any(dependent_arg_values) and not all(dependent_arg_values):
raise ValueError(
"The following arguments must be used together:"
+ "".join([f"\n --{x}" for x in dependent_arg_keys])
)
# If --clean was passed without arguments, default to cleaning from the root of the
# tree where the config file is.
if args.clean == "":
args.clean = os.path.dirname(os.path.abspath(args.config))
return args
def generate_matrix(matrix_arg):
if not matrix_arg:
return {}
matrix = {}
for matrix_column in matrix_arg.split(";"):
key, val = matrix_column.split("=")
matrix[key] = [val]
return matrix
def main(argv=None):
args = validate_args(argv)
with open(args.config) as f:
parsed_config = yaml.load(f, Loader=yaml.FullLoader)
validate_dependencies(parsed_config)
matrix = generate_matrix(args.matrix)
to_stdout = all([args.file_key, args.output, args.matrix is not None])
if to_stdout:
includes = parsed_config["files"][args.file_key]["includes"]
parsed_config["files"] = {
args.file_key: {
"matrix": matrix,
"output": args.output,
"includes": includes,
}
}
if args.clean:
delete_existing_files(args.clean)
make_dependency_files(parsed_config, args.config, to_stdout)
| 0 |
rapidsai_public_repos/dependency-file-generator/src | rapidsai_public_repos/dependency-file-generator/src/rapids_dependency_file_generator/rapids_dependency_file_generator.py | import itertools
import os
import textwrap
from collections import defaultdict
import tomlkit
import yaml
from .constants import (
OutputTypes,
cli_name,
default_channels,
default_conda_dir,
default_pyproject_dir,
default_requirements_dir,
)
OUTPUT_ENUM_VALUES = [str(x) for x in OutputTypes]
NON_NONE_OUTPUT_ENUM_VALUES = [str(x) for x in OutputTypes if not x == OutputTypes.NONE]
HEADER = f"# This file is generated by `{cli_name}`."
def delete_existing_files(root="."):
"""Delete any files generated by this generator.
This function can be used to clean up a directory tree before generating a new set
of files from scratch.
Parameters
----------
root : str
The path to the root of the directory tree to search for files to delete.
"""
for dirpath, _, filenames in os.walk(root):
for fn in filter(
lambda fn: fn.endswith(".txt") or fn.endswith(".yaml"), filenames
):
with open(file_path := os.path.join(dirpath, fn)) as f:
if HEADER in f.read():
os.remove(file_path)
def dedupe(dependencies):
"""Generate the unique set of dependencies contained in a dependency list.
Parameters
----------
dependencies : Sequence
A sequence containing dependencies (possibly including duplicates).
Yields
------
list
The `dependencies` with all duplicates removed.
"""
deduped = sorted({dep for dep in dependencies if not isinstance(dep, dict)})
dict_deps = defaultdict(list)
for dep in filter(lambda dep: isinstance(dep, dict), dependencies):
for key, values in dep.items():
dict_deps[key].extend(values)
dict_deps[key] = sorted(set(dict_deps[key]))
if dict_deps:
deduped.append(dict(dict_deps))
return deduped
def grid(gridspec):
"""Yields the Cartesian product of a `dict` of iterables.
The input ``gridspec`` is a dictionary whose keys correspond to
parameter names. Each key is associated with an iterable of the
values that parameter could take on. The result is a sequence of
dictionaries where each dictionary has one of the unique combinations
of the parameter values.
Parameters
----------
gridspec : dict
A mapping from parameter names to lists of parameter values.
Yields
------
Iterable[dict]
Each yielded value is a dictionary containing one of the unique
combinations of parameter values from `gridspec`.
"""
for values in itertools.product(*gridspec.values()):
yield dict(zip(gridspec.keys(), values))
def make_dependency_file(
file_type, name, config_file, output_dir, conda_channels, dependencies, extras=None
):
"""Generate the contents of the dependency file.
Parameters
----------
file_type : str
A string corresponding to the value of a member of constants.OutputTypes.
name : str
The name of the file to write.
config_file : str
The full path to the dependencies.yaml file.
output_dir : str
The path to the directory where the dependency files will be written.
conda_channels : str
The channels to include in the file. Only used when `file_type` is
CONDA.
dependencies : list
The dependencies to include in the file.
extras : dict
Any extra information provided for generating this dependency file.
Returns
-------
str
The contents of the file.
"""
relative_path_to_config_file = os.path.relpath(config_file, output_dir)
file_contents = textwrap.dedent(
f"""\
{HEADER}
# To make changes, edit {relative_path_to_config_file} and run `{cli_name}`.
"""
)
if file_type == str(OutputTypes.CONDA):
file_contents += yaml.dump(
{
"name": os.path.splitext(name)[0],
"channels": conda_channels,
"dependencies": dependencies,
}
)
elif file_type == str(OutputTypes.REQUIREMENTS):
file_contents += "\n".join(dependencies) + "\n"
elif file_type == str(OutputTypes.PYPROJECT):
# This file type needs to be modified in place instead of built from scratch.
with open(os.path.join(output_dir, name)) as f:
file_contents = tomlkit.load(f)
toml_deps = tomlkit.array()
for dep in dependencies:
toml_deps.add_line(dep)
toml_deps.add_line(indent="")
toml_deps.comment(
f"This list was generated by `{cli_name}`. To make changes, edit "
f"{relative_path_to_config_file} and run `{cli_name}`."
)
# Recursively descend into subtables like "[x.y.z]", creating tables as needed.
table = file_contents
for section in extras["table"].split("."):
try:
table = table[section]
except tomlkit.exceptions.NonExistentKey:
# If table is not a super-table (i.e. if it has its own contents and is
# not simply parted of a nested table name 'x.y.z') add a new line
# before adding a new sub-table.
if not table.is_super_table():
table.add(tomlkit.nl())
table[section] = tomlkit.table()
table = table[section]
key = extras.get(
"key", "requires" if extras["table"] == "build-system" else "dependencies"
)
table[key] = toml_deps
return file_contents
def get_requested_output_types(output):
"""Get the list of output file types to generate.
The possible options are enumerated by `constants.OutputTypes`. If the only
requested output is `NONE`, returns an empty list.
Parameters
----------
output : str or List[str]
A string or list of strings indicating the output types.
Returns
-------
List[str]
The list of output file types to generate.
Raises
-------
ValueError
If multiple outputs are requested and one of them is NONE, or if an
unknown output type is requested.
"""
output = output if isinstance(output, list) else [output]
if output == [str(OutputTypes.NONE)]:
return []
if len(output) > 1 and str(OutputTypes.NONE) in output:
raise ValueError("'output: [none]' cannot be combined with any other values.")
if any(v not in NON_NONE_OUTPUT_ENUM_VALUES for v in output):
raise ValueError(
"'output' key can only be "
+ ", ".join(f"'{x}'" for x in OUTPUT_ENUM_VALUES)
+ f" or a list of the non-'{OutputTypes.NONE}' values."
)
return output
def get_filename(file_type, file_key, matrix_combo):
"""Get the name of the file to which to write a generated dependency set.
The file name will be composed of the following components, each determined
by the `file_type`:
- A file-type-based prefix e.g. "requirements" for requirements.txt files.
- A name determined by the value of $FILENAME in the corresponding
[files.$FILENAME] section of dependencies.yaml. This name is used for some
output types (conda, requirements) and not others (pyproject).
- A matrix description encoding the key-value pairs in `matrix_combo`.
- A suitable extension for the file (e.g. ".yaml" for conda environment files.)
Parameters
----------
file_type : str
A string corresponding to the value of a member of constants.OutputTypes.
file_prefix : str
The name of this member in the [files] list in dependencies.yaml.
matrix_combo : dict
A mapping of key-value pairs corresponding to the
[files.$FILENAME.matrix] entry in dependencies.yaml.
Returns
-------
str
The name of the file to generate.
"""
file_type_prefix = ""
file_ext = ""
file_name_prefix = file_key
if file_type == str(OutputTypes.CONDA):
file_ext = ".yaml"
elif file_type == str(OutputTypes.REQUIREMENTS):
file_ext = ".txt"
file_type_prefix = "requirements"
elif file_type == str(OutputTypes.PYPROJECT):
file_ext = ".toml"
# Unlike for files like requirements.txt or conda environment YAML files, which
# may be named with additional prefixes (e.g. all_cuda_*) pyproject.toml files
# need to have that exact name and are never prefixed.
file_name_prefix = "pyproject"
suffix = "_".join([f"{k}-{v}" for k, v in matrix_combo.items()])
filename = "_".join(
filter(None, (file_type_prefix, file_name_prefix, suffix))
).replace(".", "")
return filename + file_ext
def get_output_dir(file_type, config_file_path, file_config):
"""Get the directory to which to write a generated dependency file.
The output directory is determined by the `file_type` and the corresponding
key in the `file_config`. The path provided in `file_config` will be taken
relative to `output_root`.
Parameters
----------
file_type : str
A string corresponding to the value of a member of constants.OutputTypes.
output_root : str
The path to the root directory where dependency files are placed.
file_config : dict
A dictionary corresponding to one of the [files.$FILENAME] sections of
the dependencies.yaml file. May contain `conda_dir` or
`requirements_dir`.
Returns
-------
str
The directory to write the file to.
"""
path = [os.path.dirname(config_file_path)]
if file_type == str(OutputTypes.CONDA):
path.append(file_config.get("conda_dir", default_conda_dir))
elif file_type == str(OutputTypes.REQUIREMENTS):
path.append(file_config.get("requirements_dir", default_requirements_dir))
elif file_type == str(OutputTypes.PYPROJECT):
path.append(file_config.get("pyproject_dir", default_pyproject_dir))
return os.path.join(*path)
def should_use_specific_entry(matrix_combo, specific_entry_matrix):
"""Check if an entry should be used.
Dependencies listed in the [dependencies.$DEPENDENCY_GROUP.specific]
section are specific to a particular matrix entry provided by the
[matrices] list. This function validates the [matrices.matrix] value
against the provided `matrix_combo` to check if they are compatible.
A `specific_entry_matrix` is compatible with a `matrix_combo` if and only if
`specific_entry_matrix[key] == matrix_combo[key]` for every key defined in
`specific_entry_matrix`. A `matrix_combo` may contain additional keys not
specified by `specific_entry_matrix`.
Parameters
----------
matrix_combo : dict
A mapping from matrix keys to values for the current file being
generated.
specific_entry_matrix : dict
A mapping from matrix keys to values for the current specific
dependency set being checked.
Returns
-------
bool
True if the `specific_entry_matrix` is compatible with the current
`matrix_combo` and False otherwise.
"""
return all(
matrix_combo.get(specific_key) == specific_value
for specific_key, specific_value in specific_entry_matrix.items()
)
def make_dependency_files(parsed_config, config_file_path, to_stdout):
"""Generate dependency files.
This function iterates over data parsed from a YAML file conforming to the
`dependencies.yaml file spec <https://github.com/rapidsai/dependency-file-generator#dependenciesyaml-format>__`
and produces the requested files.
Parameters
----------
parsed_config : dict
The parsed dependencies.yaml config file.
config_file_path : str
The path to the dependencies.yaml file.
to_stdout : bool
Whether the output should be written to stdout. If False, it will be
written to a file computed based on the output file type and
config_file_path.
Raises
-------
ValueError
If the file is malformed. There are numerous different error cases
which are described by the error messages.
"""
channels = parsed_config.get("channels", default_channels) or default_channels
files = parsed_config["files"]
if to_stdout and any(
OutputTypes.PYPROJECT in get_requested_output_types(files[f]["output"])
for f in files
):
raise ValueError("to_stdout is not supported for pyproject.toml files.")
for file_key, file_config in files.items():
includes = file_config["includes"]
file_types_to_generate = get_requested_output_types(file_config["output"])
extras = file_config.get("extras", {})
for file_type in file_types_to_generate:
if file_type == "pyproject" and "matrix" in file_config:
raise ValueError(
"matrix entries are not supported for pyproject.toml files."
)
for matrix_combo in grid(file_config.get("matrix", {})):
dependencies = []
# Collect all includes from each dependency list corresponding
# to this (file_name, file_type, matrix_combo) tuple. The
# current tuple corresponds to a single file to be written.
for include in includes:
dependency_entry = parsed_config["dependencies"][include]
for common_entry in dependency_entry.get("common", []):
if file_type not in common_entry["output_types"]:
continue
dependencies.extend(common_entry["packages"])
for specific_entry in dependency_entry.get("specific", []):
if file_type not in specific_entry["output_types"]:
continue
found = False
fallback_entry = None
for specific_matrices_entry in specific_entry["matrices"]:
# An empty `specific_matrices_entry["matrix"]` is
# valid and can be used to specify a fallback_entry for a
# `matrix_combo` for which no specific entry
# exists. In that case we save the fallback_entry result
# and only use it at the end if nothing more
# specific is found.
if not specific_matrices_entry["matrix"]:
fallback_entry = specific_matrices_entry
continue
if should_use_specific_entry(
matrix_combo, specific_matrices_entry["matrix"]
):
# Raise an error if multiple specific entries
# (not including the fallback_entry) match a
# requested matrix combination.
if found:
raise ValueError(
f"Found multiple matches for matrix {matrix_combo}"
)
found = True
# A package list may be empty as a way to
# indicate that for some matrix elements no
# packages should be installed.
dependencies.extend(
specific_matrices_entry["packages"] or []
)
if not found:
if fallback_entry:
dependencies.extend(fallback_entry["packages"] or [])
else:
raise ValueError(
f"No matching matrix found in '{include}' for: {matrix_combo}"
)
# Dedupe deps and print / write to filesystem
full_file_name = get_filename(file_type, file_key, matrix_combo)
deduped_deps = dedupe(dependencies)
output_dir = (
"."
if to_stdout
else get_output_dir(file_type, config_file_path, file_config)
)
contents = make_dependency_file(
file_type,
full_file_name,
config_file_path,
output_dir,
channels,
deduped_deps,
extras,
)
if to_stdout:
print(contents)
else:
os.makedirs(output_dir, exist_ok=True)
file_path = os.path.join(output_dir, full_file_name)
with open(file_path, "w") as f:
if file_type == str(OutputTypes.PYPROJECT):
tomlkit.dump(contents, f)
else:
f.write(contents)
| 0 |
rapidsai_public_repos/dependency-file-generator/src | rapidsai_public_repos/dependency-file-generator/src/rapids_dependency_file_generator/_version.py | __version__ = "1.7.1"
| 0 |
rapidsai_public_repos/dependency-file-generator/src | rapidsai_public_repos/dependency-file-generator/src/rapids_dependency_file_generator/constants.py | from enum import Enum
class OutputTypes(Enum):
CONDA = "conda"
REQUIREMENTS = "requirements"
PYPROJECT = "pyproject"
NONE = "none"
def __str__(self):
return self.value
cli_name = "rapids-dependency-file-generator"
default_channels = [
"rapidsai",
"rapidsai-nightly",
"dask/label/dev",
"conda-forge",
"nvidia",
]
default_conda_dir = "conda/environments"
default_requirements_dir = "python"
default_pyproject_dir = "python"
default_dependency_file_path = "dependencies.yaml"
| 0 |
rapidsai_public_repos/dependency-file-generator/src | rapidsai_public_repos/dependency-file-generator/src/rapids_dependency_file_generator/rapids_dependency_file_validator.py | """Logic for validating dependency files."""
import importlib.resources
import json
import sys
import textwrap
import jsonschema
from jsonschema.exceptions import best_match
SCHEMA = json.loads(
importlib.resources.files(__package__).joinpath("schema.json").read_bytes()
)
def validate_dependencies(dependencies):
"""Valid a dictionary against the dependencies.yaml spec.
Parameters
----------
dependencies : dict
The parsed dependencies.yaml file.
Raises
------
jsonschema.exceptions.ValidationError
If the dependencies do not conform to the schema
"""
validator = jsonschema.Draft7Validator(SCHEMA)
errors = list(validator.iter_errors(dependencies))
if len(errors) > 0:
print("The provided dependency file contains schema errors.", file=sys.stderr)
best_matching_error = best_match(errors)
print(
"\n", textwrap.indent(str(best_matching_error), "\t"), "\n", file=sys.stderr
)
raise RuntimeError("The provided dependencies data is invalid.")
| 0 |
rapidsai_public_repos/dependency-file-generator/src | rapidsai_public_repos/dependency-file-generator/src/rapids_dependency_file_generator/__init__.py | from ._version import __version__
__all__ = [
"__version__",
]
| 0 |
rapidsai_public_repos | rapidsai_public_repos/nvgraph/Acknowledgements.md | # Acknowledgements
NVGRAPH is the product of a large community of developers and reserachers since 2014, and we’re deeply
appreciative for their work. Here is a list of people from NVIDIA who helped contribute up until the process of open sourcing it:
Managers
- Harun Bayraktar
- Joe Eaton
- Alex Fit-Florea
Nvgraph dev team
- Marat Arsaev
- Alex Fender
- Andrei Schaffer
Contributors from other teams
- Hugo Braun
- Slawomir Kierat
- Ahmad Kiswani
- Szymon Migacz
- Maxim Naumov
- Nikolay Sakharnykh
- James Wyles
Interns
- Danielle Maddix
- Tim Moon
And last but not least, thank you also to the contributors from CUDA PM and QA teams who help building nvgraph since its early days.
| 0 |
rapidsai_public_repos | rapidsai_public_repos/nvgraph/README.md | # nvGraph - NVIDIA graph library
Data analytics is a growing application of high-performance computing. Many advanced data analytics problems can be couched as graph problems. In turn, many of the common graph problems today can be couched as sparse linear algebra. This is the motivation for nvGraph, which harnesses the power of GPUs for linear algebra to handle large graph analytics.
This repository contains the legacy version of nvGraph as it was in the NVIDIA CUDA Toolkit. The aim is to provide a way for nvGraph users to continue using nvGraph after the CUDA Toolkit stops releasing it. While we still accept bug reports, we do not actively develop this product. If you find and can reproduce bugs in nvGRAPH, please [report issues on GitHub](https://github.com/rapidsai/nvgraph/issues/new).
Recently, NVIDIA started developing [cuGraph](https://github.com/rapidsai/cugraph) a collection of graph analytics that process data found in GPU Dataframes as part of [RAPIDS](https://rapids.ai/). Most nvGraph algorithms are now part of cuGraph too. In addition, cuGraph aims to provide a NetworkX-like API that will be familiar to data scientists, so they can now build GPU-accelerated workflows more easily. For more project details, see [rapids.ai](https://rapids.ai/).
## Get nvGrpah
#### Prerequisites
Compiler requirement:
* `gcc` version 5.4+
* `nvcc` version 9.2
* `cmake` version 3.12
CUDA requirement:
* CUDA 9.2+
* NVIDIA driver 396.44+
* Pascal architecture or better
You can obtain CUDA from [https://developer.nvidia.com/cuda-downloads](https://developer.nvidia.com/cuda-downloads).
Compiler requirements:
### Using the script
It is easy to install nvGraph from source. As a convenience, a `build.sh` script is provided. Run the script as shown below to download the source code, build and install the library. Note that the library will be installed to the location set in `$CUDA_ROOT` (eg. `export CUDA_ROOT=/usr/local/cuda`). These instructions were tested on Ubuntu 18.04.
```bash
git clone https://github.com/rapidsai/nvgraph.git
cd nvgraph
export CUDA_ROOT=/usr/local/cuda
./build.sh # build the nvGraph library and install it to $CUDA_ROOT (you may need to add the sudo prefix)
```
### Manually build from Source
The following instructions are for developers and contributors to nvGraph development. These instructions were tested on Linux Ubuntu 18.04. Use these instructions to build nvGraph from source and contribute to its development. Other operating systems may be compatible, but are not currently tested.
The nvGraph package is a C/C++ CUDA library. It needs to be installed in order for nvGraph to operate correctly.
The following instructions are tested on Linux systems.
#### Build and Install the C/C++ CUDA components
To install nvGraph from source, ensure the dependencies are met and follow the steps below:
1) Clone the repository and submodules
```bash
# Set the localtion to nvGraph in an environment variable NVGRAPH_HOME
export NVGRAPH_HOME=$(pwd)/nvgraph
# Download the nvGraph repo
git clone https://github.com/rapidsai/nvgraph.git $NVGRAPH_HOME
# Next load all the submodules
cd $NVGRAPH_HOME
git submodule update --init --recursive
```
2) Build and install `libnvgraph_rapids.so`. CMake depends on the `nvcc` executable being on your path or defined in `$CUDACXX`.
This project uses cmake for building the C/C++ library. To configure cmake, run:
```bash
cd $NVGRAPH_HOME
cd cpp # enter nvgraph's cpp directory
mkdir build # create build directory
cd build # enter the build directory
cmake .. -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX
# now build the code
make -j # "-j" starts multiple threads
make install # install the libraries
```
The default installation locations are `$CMAKE_INSTALL_PREFIX/lib` and `$CMAKE_INSTALL_PREFIX/include/nvgraph` respectively.
#### C++ stand alone tests
```bash
# Run the tests
cd $NVGRAPH_HOME
cd cpp/build
gtests/NVGRAPH_TEST # this is an executable file
```
These tests verify that the library was properly built and that the graph structure works as expected.
We currently do not maintain the algorithm test suite. Most graph analytics features are now developed and tested in [cuGraph](https://github.com/rapidsai/cugraph).
## Documentation
The C API documentation can be found in the [CUDA Toolkit Documentation](https://docs.nvidia.com/cuda/archive/10.0/nvgraph/index.html).
| 0 |
rapidsai_public_repos | rapidsai_public_repos/nvgraph/build.sh | #!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION.
# nvgraph build script
# This script is used to build the component(s) in this repo from
# source, and can be called with various options to customize the
# build as needed (see the help output for details)
# Abort script on first error
set -e
NUMARGS=$#
ARGS=$*
# NOTE: ensure all dir changes are relative to the location of this
# script, and that this script resides in the repo dir!
REPODIR=$(cd $(dirname $0); pwd)
VALIDARGS="clean -v -g -n -h --help"
HELP="$0 [<target> ...] [<flag> ...]
where <target> is:
clean - remove all existing build artifacts and configuration (start over)
and <flag> is:
-v - verbose build mode
-n - no install step
-h - print this text
default action (no args) is to build and install 'libnvgraph' targets
"
LIBNVGRAPH_BUILD_DIR=${REPODIR}/cpp/build
BUILD_DIRS="${LIBNVGRAPH_BUILD_DIR}"
# Set defaults for vars modified by flags to this script
VERBOSE=""
BUILD_TYPE=Release
INSTALL_TARGET=install
INSTALL_PREFIX=${CUDA_ROOT}
PARALLEL_LEVEL=${PARALLEL_LEVEL:=""}
BUILD_ABI=${BUILD_ABI:=ON}
function hasArg {
(( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ")
}
if hasArg -h || hasArg --help; then
echo "${HELP}"
exit 0
fi
# Check for valid usage
if (( ${NUMARGS} != 0 )); then
for a in ${ARGS}; do
if ! (echo " ${VALIDARGS} " | grep -q " ${a} "); then
echo "Invalid option: ${a}"
exit 1
fi
done
fi
# Process flags
if hasArg -v; then
VERBOSE=1
fi
if hasArg -g; then
BUILD_TYPE=Debug
fi
if hasArg -n; then
INSTALL_TARGET=""
fi
# If clean given, run it prior to any other steps
if hasArg clean; then
# If the dirs to clean are mounted dirs in a container, the
# contents should be removed but the mounted dirs will remain.
# The find removes all contents but leaves the dirs, the rmdir
# attempts to remove the dirs but can fail safely.
for bd in ${BUILD_DIRS}; do
if [ -d ${bd} ]; then
find ${bd} -mindepth 1 -delete
rmdir ${bd} || true
fi
done
fi
################################################################################
# Configure, build, and install libnvgraph
git submodule update --init --recursive
mkdir -p ${LIBNVGRAPH_BUILD_DIR}
cd ${LIBNVGRAPH_BUILD_DIR}
cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-DCMAKE_CXX11_ABI=${BUILD_ABI} \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} ..
make -j${PARALLEL_LEVEL} VERBOSE=${VERBOSE} ${INSTALL_TARGET}
| 0 |
rapidsai_public_repos | rapidsai_public_repos/nvgraph/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/nvgraph | rapidsai_public_repos/nvgraph/external/cusparse_internal.h | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if !defined(CUSPARSE_INTERNAL_H_)
#define CUSPARSE_INTERNAL_H_
#ifndef CUSPARSEAPI
#ifdef _WIN32
#define CUSPARSEAPI __stdcall
#else
#define CUSPARSEAPI
#endif
#endif
#define CACHE_LINE_SIZE 128
#define ALIGN_32(x) ((((x)+31)/32)*32)
#if defined(__cplusplus)
extern "C" {
#endif /* __cplusplus */
struct csrilu02BatchInfo;
typedef struct csrilu02BatchInfo *csrilu02BatchInfo_t;
struct csrxilu0Info;
typedef struct csrxilu0Info *csrxilu0Info_t;
struct csrxgemmSchurInfo;
typedef struct csrxgemmSchurInfo *csrxgemmSchurInfo_t;
struct csrxtrsmInfo;
typedef struct csrxtrsmInfo *csrxtrsmInfo_t;
struct csrilu03Info;
typedef struct csrilu03Info *csrilu03Info_t;
struct csrmmInfo;
typedef struct csrmmInfo *csrmmInfo_t;
cudaStream_t cusparseGetStreamInternal(const struct cusparseContext *ctx);
cusparseStatus_t CUSPARSEAPI cusparseCheckBuffer(
cusparseHandle_t handle,
void *workspace);
//------- gather: dst = src(map) ---------------------
cusparseStatus_t CUSPARSEAPI cusparseIgather(
cusparseHandle_t handle,
int n,
const int *src,
const int *map,
int *dst);
cusparseStatus_t CUSPARSEAPI cusparseSgather(
cusparseHandle_t handle,
int n,
const float *src,
const int *map,
float *dst);
cusparseStatus_t CUSPARSEAPI cusparseDgather(
cusparseHandle_t handle,
int n,
const double *src,
const int *map,
double *dst);
cusparseStatus_t CUSPARSEAPI cusparseCgather(
cusparseHandle_t handle,
int n,
const cuComplex *src,
const int *map,
cuComplex *dst);
cusparseStatus_t CUSPARSEAPI cusparseZgather(
cusparseHandle_t handle,
int n,
const cuDoubleComplex *src,
const int *map,
cuDoubleComplex *dst);
//------- scatter: dst(map) = src ---------------------
cusparseStatus_t CUSPARSEAPI cusparseIscatter(
cusparseHandle_t handle,
int n,
const int *src,
int *dst,
const int *map);
cusparseStatus_t CUSPARSEAPI cusparseSscatter(
cusparseHandle_t handle,
int n,
const float *src,
float *dst,
const int *map);
cusparseStatus_t CUSPARSEAPI cusparseDscatter(
cusparseHandle_t handle,
int n,
const double *src,
double *dst,
const int *map);
cusparseStatus_t CUSPARSEAPI cusparseCscatter(
cusparseHandle_t handle,
int n,
const cuComplex *src,
cuComplex *dst,
const int *map);
cusparseStatus_t CUSPARSEAPI cusparseZscatter(
cusparseHandle_t handle,
int n,
const cuDoubleComplex *src,
cuDoubleComplex *dst,
const int *map);
// x[j] = j
cusparseStatus_t CUSPARSEAPI cusparseIidentity(
cusparseHandle_t handle,
int n,
int *x);
// x[j] = val
cusparseStatus_t CUSPARSEAPI cusparseImemset(
cusparseHandle_t handle,
int n,
int val,
int *x);
cusparseStatus_t CUSPARSEAPI cusparseI64memset(
cusparseHandle_t handle,
size_t n,
int val,
int *x);
// ----------- reduce -----------------
/*
* cusparseStatus_t
* cusparseIreduce_bufferSize( cusparseHandle_t handle,
* int n,
* int *pBufferSizeInBytes)
* Input
* -----
* handle handle to CUSPARSE library context.
* n number of elements.
*
* Output
* ------
* pBufferSizeInBytes size of working space in bytes.
*
* Error Status
* ------------
* CUSPARSE_STATUS_SUCCESS the operation completed successfully.
* CUSPARSE_STATUS_NOT_INITIALIZED the library was not initialized.
* CUSPARSE_STATUS_INVALID_VALUE n is too big or negative
* CUSPARSE_STATUS_INTERNAL_ERROR an internal operation failed.
* If n is normal, we should not have this internal error.
*
* ---------
* Assumption:
* Only support n < 2^31.
*
*/
cusparseStatus_t CUSPARSEAPI cusparseIreduce_bufferSizeExt(
cusparseHandle_t handle,
int n,
size_t *pBufferSizeInBytes);
/*
* cusparseStatus_t
* cusparseIreduce(cusparseHandle_t handle,
* int n,
* int *src,
* int *pBuffer,
* int *total_sum)
*
* total_sum = reduction(src)
*
* Input
* -------
* handle handle to the CUSPARSE library context.
* n number of elements in src and dst.
* src <int> array of n elements.
* pBuffer working space, the size is reported by cusparseIinclusiveScan_bufferSizeExt.
* Or it can be a NULL pointer, then CUSPARSE library allocates working space implicitly.
*
* Output
* -------
* total_sum total_sum = reduction(src) if total_sum is not a NULL pointer.
*
*
* Error Status
* ------------
* CUSPARSE_STATUS_SUCCESS the operation completed successfully.
* CUSPARSE_STATUS_NOT_INITIALIZED the library was not initialized.
* CUSPARSE_STATUS_ALLOC_FAILED the resources could not be allocated.
* it is possible if pBuffer is NULL.
* CUSPARSE_STATUS_INTERNAL_ERROR an internal operation failed.
*
*
*/
cusparseStatus_t CUSPARSEAPI cusparseIreduce(
cusparseHandle_t handle,
int n,
int *src,
void *pBuffer,
int *total_sum);
// ----------- prefix sum -------------------
/*
* cusparseStatus_t
* cusparseIinclusiveScan_bufferSizeExt( cusparseHandle_t handle,
* int n,
* size_t *pBufferSizeInBytes)
* Input
* -----
* handle handle to CUSPARSE library context.
* n number of elements.
*
* Output
* ------
* pBufferSizeInBytes size of working space in bytes.
*
* Error Status
* ------------
* CUSPARSE_STATUS_SUCCESS the operation completed successfully.
* CUSPARSE_STATUS_NOT_INITIALIZED the library was not initialized.
* CUSPARSE_STATUS_INVALID_VALUE n is too big or negative
* CUSPARSE_STATUS_INTERNAL_ERROR an internal operation failed.
* If n is normal, we should not have this internal error.
*
* ---------
* Assumption:
* Only support n < 2^31.
*
*/
cusparseStatus_t CUSPARSEAPI cusparseIinclusiveScan_bufferSizeExt(
cusparseHandle_t handle,
int n,
size_t *pBufferSizeInBytes);
/*
* cusparseStatus_t
* cusparseIinclusiveScan(cusparseHandle_t handle,
* int base,
* int n,
* int *src,
* void *pBuffer,
* int *dst,
* int *total_sum)
*
* dst = inclusiveScan(src) + base
* total_sum = reduction(src)
*
* Input
* -------
* handle handle to the CUSPARSE library context.
* n number of elements in src and dst.
* src <int> array of n elements.
* pBuffer working space, the size is reported by cusparseIinclusiveScan_bufferSizeExt.
* Or it can be a NULL pointer, then CUSPARSE library allocates working space implicitly.
*
* Output
* -------
* dst <int> array of n elements.
* dst = inclusiveScan(src) + base
* total_sum total_sum = reduction(src) if total_sum is not a NULL pointer.
*
* Error Status
* ------------
* CUSPARSE_STATUS_SUCCESS the operation completed successfully.
* CUSPARSE_STATUS_NOT_INITIALIZED the library was not initialized.
* CUSPARSE_STATUS_ALLOC_FAILED the resources could not be allocated.
* it is possible if pBuffer is NULL.
* CUSPARSE_STATUS_INTERNAL_ERROR an internal operation failed.
*
*/
cusparseStatus_t CUSPARSEAPI cusparseIinclusiveScan(
cusparseHandle_t handle,
int base,
int n,
int *src,
void *pBuffer,
int *dst,
int *total_sum);
// ----------- stable sort -----------------
/*
* cusparseStatus_t
* cusparseIstableSortByKey_bufferSizeExt( cusparseHandle_t handle,
* int n,
* size_t *pBufferSizeInBytes)
* Input
* -----
* handle handle to CUSPARSE library context.
* n number of elements.
*
* Output
* ------
* pBufferSizeInBytes size of working space in bytes.
*
* Error Status
* ------------
* CUSPARSE_STATUS_SUCCESS the operation completed successfully.
* CUSPARSE_STATUS_NOT_INITIALIZED the library was not initialized.
* CUSPARSE_STATUS_INVALID_VALUE n is too big or negative
* CUSPARSE_STATUS_INTERNAL_ERROR an internal operation failed.
* If n is normal, we should not have this internal error.
*
* ---------
* Assumption:
* Only support n < 2^30 because of domino scheme.
*
*/
cusparseStatus_t CUSPARSEAPI cusparseIstableSortByKey_bufferSizeExt(
cusparseHandle_t handle,
int n,
size_t *pBufferSizeInBytes);
/*
* cusparseStatus_t
* cusparseIstableSortByKey( cusparseHandle_t handle,
* int n,
* int *key,
* int *P)
*
* in-place radix sort.
* This is an inhouse design of thrust::stable_sort_by_key(key, P)
*
* Input
* -----
* handle handle to CUSPARSE library context.
* n number of elements.
* key <int> array of n elements.
* P <int> array of n elements.
* pBuffer working space, the size is reported by cusparseIstableSortByKey_bufferSize.
* Or it can be a NULL pointer, then CUSPARSE library allocates working space implicitly.
*
* Output
* ------
* key <int> array of n elements.
* P <int> array of n elements.
*
* Error Status
* ------------
* CUSPARSE_STATUS_SUCCESS the operation completed successfully.
* CUSPARSE_STATUS_NOT_INITIALIZED the library was not initialized.
* CUSPARSE_STATUS_ALLOC_FAILED the resources could not be allocated.
* CUSPARSE_STATUS_INTERNAL_ERROR an internal operation failed.
*
* -----
* Assumption:
* Only support n < 2^30 because of domino scheme.
*
* -----
* Usage:
* int nBufferSize = 0;
* status = cusparseIstableSortByKey_bufferSize(handle, n, &nBufferSize);
* assert(CUSPARSE_STATUS_SUCCESS == status);
*
* int *pBuffer;
* cudaStat = cudaMalloc((void**)&pBuffer, (size_t)nBufferSize);
* assert(cudaSuccess == cudaStat);
*
* d_P = 0:n-1 ;
* status = cusparseIstableSortByKey(handle, n, d_csrRowPtrA, d_P, pBuffer);
* assert(CUSPARSE_STATUS_SUCCESS == status);
*
*/
cusparseStatus_t CUSPARSEAPI cusparseIstableSortByKey(
cusparseHandle_t handle,
int n,
int *key,
int *P,
void *pBuffer);
// ------------------- csr42csr ------------------
cusparseStatus_t CUSPARSEAPI cusparseXcsr42csr_bufferSize(
cusparseHandle_t handle,
int m,
int n,
const cusparseMatDescr_t descrA,
int nnzA,
const int *csrRowPtrA,
const int *csrEndPtrA,
size_t *pBufferSizeInByte );
cusparseStatus_t CUSPARSEAPI cusparseXcsr42csrRows(
cusparseHandle_t handle,
int m,
int n,
const cusparseMatDescr_t descrA,
int nnzA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrC,
int *csrRowPtrC,
int *nnzTotalDevHostPtr,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseXcsr42csrCols(
cusparseHandle_t handle,
int m,
int n,
const cusparseMatDescr_t descrA,
int nnzA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrC,
const int *csrRowPtrC,
int *csrColIndC,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseScsr42csrVals(
cusparseHandle_t handle,
int m,
int n,
const float *alpha,
const cusparseMatDescr_t descrA,
int nnzA,
const float *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrC,
float *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseDcsr42csrVals(
cusparseHandle_t handle,
int m,
int n,
const double *alpha,
const cusparseMatDescr_t descrA,
int nnzA,
const double *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrC,
double *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseCcsr42csrVals(
cusparseHandle_t handle,
int m,
int n,
const cuComplex *alpha,
const cusparseMatDescr_t descrA,
int nnzA,
const cuComplex *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrC,
cuComplex *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseZcsr42csrVals(
cusparseHandle_t handle,
int m,
int n,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descrA,
int nnzA,
const cuDoubleComplex *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrC,
cuDoubleComplex *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
void *pBuffer );
// ----- csrmv_hyb ------------------------------
cusparseStatus_t CUSPARSEAPI cusparseScsrmv_hyb(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const float *alpha,
const cusparseMatDescr_t descra,
const float *csrVal,
const int *csrRowPtr,
const int *csrColInd,
const float *x,
const float *beta,
float *y);
cusparseStatus_t CUSPARSEAPI cusparseDcsrmv_hyb(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const double *alpha,
const cusparseMatDescr_t descra,
const double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
const double *x,
const double *beta,
double *y);
cusparseStatus_t CUSPARSEAPI cusparseCcsrmv_hyb(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const cuComplex *alpha,
const cusparseMatDescr_t descra,
const cuComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
const cuComplex *x,
const cuComplex *beta,
cuComplex *y);
cusparseStatus_t CUSPARSEAPI cusparseZcsrmv_hyb(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descra,
const cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
const cuDoubleComplex *x,
const cuDoubleComplex *beta,
cuDoubleComplex *y);
// ------------- getrf_ilu ---------------------
cusparseStatus_t CUSPARSEAPI cusparseSgetrf_ilu(
cusparseHandle_t handle,
const int submatrix_k,
const int n,
float *A,
const int *pattern,
const int lda,
int *d_status,
int enable_boost,
double *tol_ptr,
float *boost_ptr);
cusparseStatus_t CUSPARSEAPI cusparseDgetrf_ilu(
cusparseHandle_t handle,
const int submatrix_k,
const int n,
double *A,
const int *pattern,
const int lda,
int *d_status,
int enable_boost,
double *tol_ptr,
double *boost_ptr);
cusparseStatus_t CUSPARSEAPI cusparseCgetrf_ilu(
cusparseHandle_t handle,
const int submatrix_k,
const int n,
cuComplex *A,
const int *pattern,
const int lda,
int *d_status,
int enable_boost,
double *tol_ptr,
cuComplex *boost_ptr);
cusparseStatus_t CUSPARSEAPI cusparseZgetrf_ilu(
cusparseHandle_t handle,
const int submatrix_k,
const int n,
cuDoubleComplex *A,
const int *pattern,
const int lda,
int *d_status,
int enable_boost,
double *tol_ptr,
cuDoubleComplex *boost_ptr);
// ------------- potrf_ic ---------------------
cusparseStatus_t CUSPARSEAPI cusparseSpotrf_ic(
cusparseHandle_t handle,
const int submatrix_k,
const int n,
float *A,
const int *pattern,
const int lda,
int *d_status);
cusparseStatus_t CUSPARSEAPI cusparseDpotrf_ic(
cusparseHandle_t handle,
const int submatrix_k,
const int n,
double *A,
const int *pattern,
const int lda,
int *d_status);
cusparseStatus_t CUSPARSEAPI cusparseCpotrf_ic(
cusparseHandle_t handle,
const int submatrix_k,
const int n,
cuComplex *A,
const int *pattern,
const int lda,
int *d_status);
cusparseStatus_t CUSPARSEAPI cusparseZpotrf_ic(
cusparseHandle_t handle,
const int submatrix_k,
const int n,
cuDoubleComplex *A,
const int *pattern,
const int lda,
int *d_status);
cusparseStatus_t CUSPARSEAPI cusparseXcsric02_denseConfig(
csric02Info_t info,
int enable_dense_block,
int max_dim_dense_block,
int threshold_dense_block,
double ratio);
cusparseStatus_t CUSPARSEAPI cusparseXcsric02_workspaceConfig(
csric02Info_t info,
int disable_workspace_limit);
cusparseStatus_t CUSPARSEAPI cusparseXcsrilu02_denseConfig(
csrilu02Info_t info,
int enable_dense_block,
int max_dim_dense_block,
int threshold_dense_block,
double ratio);
cusparseStatus_t CUSPARSEAPI cusparseXcsrilu02_workspaceConfig(
csrilu02Info_t info,
int disable_workspace_limit);
cusparseStatus_t CUSPARSEAPI cusparseXcsrilu02Batch_denseConfig(
csrilu02BatchInfo_t info,
int enable_dense_block,
int max_dim_dense_block,
int threshold_dense_block,
double ratio);
cusparseStatus_t CUSPARSEAPI cusparseXcsrilu02Batch_workspaceConfig(
csrilu02BatchInfo_t info,
int disable_workspace_limit);
// ---------------- csric02 internal ----------------
cusparseStatus_t CUSPARSEAPI cusparseXcsric02_getLevel(
csric02Info_t info,
int **level_ref);
cusparseStatus_t CUSPARSEAPI cusparseScsric02_internal(
cusparseHandle_t handle,
int enable_potrf,
int dense_block_start,
//int dense_block_dim, // = m - dense_block_start
int dense_block_lda,
int *level, // level is a permutation vector of 0:(m-1)
int m,
int nnz,
const cusparseMatDescr_t descrA,
float *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csric02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsric02_internal(
cusparseHandle_t handle,
int enable_potrf,
int dense_block_start,
//int dense_block_dim, // = m - dense_block_start
int dense_block_lda,
int *level, // level is a permutation vector of 0:(m-1)
int m,
int nnz,
const cusparseMatDescr_t descrA,
double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csric02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsric02_internal(
cusparseHandle_t handle,
int enable_potrf,
int dense_block_start,
//int dense_block_dim, // = m - dense_block_start
int dense_block_lda,
int *level, // level is a permutation vector of 0:(m-1)
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csric02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsric02_internal(
cusparseHandle_t handle,
int enable_potrf,
int dense_block_start,
//int dense_block_dim, // = m - dense_block_start
int dense_block_lda,
int *level, // level is a permutation vector of 0:(m-1)
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csric02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
// csrilu02 internal
cusparseStatus_t CUSPARSEAPI cusparseXcsrilu02_getLevel(
csrilu02Info_t info,
int **level_ref);
cusparseStatus_t CUSPARSEAPI cusparseXcsrilu02_getCsrEndPtrL(
csrilu02Info_t info,
int **csrEndPtrL_ref);
// ----------------- batch ilu0 -----------------
cusparseStatus_t CUSPARSEAPI cusparseCreateCsrilu02BatchInfo(
csrilu02BatchInfo_t *info);
cusparseStatus_t CUSPARSEAPI cusparseDestroyCsrilu02BatchInfo(
csrilu02BatchInfo_t info);
cusparseStatus_t CUSPARSEAPI cusparseXcsrilu02Batch_zeroPivot(
cusparseHandle_t handle,
csrilu02BatchInfo_t info,
int *position);
cusparseStatus_t CUSPARSEAPI cusparseScsrilu02Batch_numericBoost(
cusparseHandle_t handle,
csrilu02BatchInfo_t info,
int enable_boost,
double *tol,
float *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseDcsrilu02Batch_numericBoost(
cusparseHandle_t handle,
csrilu02BatchInfo_t info,
int enable_boost,
double *tol,
double *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseCcsrilu02Batch_numericBoost(
cusparseHandle_t handle,
csrilu02BatchInfo_t info,
int enable_boost,
double *tol,
cuComplex *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseZcsrilu02Batch_numericBoost(
cusparseHandle_t handle,
csrilu02BatchInfo_t info,
int enable_boost,
double *tol,
cuDoubleComplex *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseScsrilu02Batch_bufferSizeExt(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
float *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseDcsrilu02Batch_bufferSizeExt(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseCcsrilu02Batch_bufferSizeExt(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseZcsrilu02Batch_bufferSizeExt(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseScsrilu02Batch_analysis(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const float *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrilu02Batch_analysis(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrilu02Batch_analysis(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const cuComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrilu02Batch_analysis(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseScsrilu02Batch(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descra,
float *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrilu02Batch(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descra,
double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrilu02Batch(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descra,
cuComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrilu02Batch(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descra,
cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrilu02BatchInfo_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
// --------------- csrsv2 batch --------------
cusparseStatus_t CUSPARSEAPI cusparseScsrsv2Batch_bufferSizeExt(
cusparseHandle_t handle,
cusparseOperation_t transA,
int m,
int nnz,
const cusparseMatDescr_t descrA,
float *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrsv2Info_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseDcsrsv2Batch_bufferSizeExt(
cusparseHandle_t handle,
cusparseOperation_t transA,
int m,
int nnz,
const cusparseMatDescr_t descrA,
double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrsv2Info_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseCcsrsv2Batch_bufferSizeExt(
cusparseHandle_t handle,
cusparseOperation_t transA,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrsv2Info_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseZcsrsv2Batch_bufferSizeExt(
cusparseHandle_t handle,
cusparseOperation_t transA,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrsv2Info_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseScsrsv2Batch_analysis(
cusparseHandle_t handle,
cusparseOperation_t transA,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const float *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrsv2Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrsv2Batch_analysis(
cusparseHandle_t handle,
cusparseOperation_t transA,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrsv2Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrsv2Batch_analysis(
cusparseHandle_t handle,
cusparseOperation_t transA,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const cuComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrsv2Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrsv2Batch_analysis(
cusparseHandle_t handle,
cusparseOperation_t transA,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
int batchSize,
csrsv2Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseXcsrsv2Batch_zeroPivot(
cusparseHandle_t handle,
csrsv2Info_t info,
int *position);
cusparseStatus_t CUSPARSEAPI cusparseScsrsv2Batch_solve(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int nnz,
const cusparseMatDescr_t descra,
const float *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csrsv2Info_t info,
const float *x,
float *y,
int batchSize,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrsv2Batch_solve(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int nnz,
const cusparseMatDescr_t descra,
const double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csrsv2Info_t info,
const double *x,
double *y,
int batchSize,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrsv2Batch_solve(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int nnz,
const cusparseMatDescr_t descra,
const cuComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csrsv2Info_t info,
const cuComplex *x,
cuComplex *y,
int batchSize,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrsv2Batch_solve(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int nnz,
const cusparseMatDescr_t descra,
const cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csrsv2Info_t info,
const cuDoubleComplex *x,
cuDoubleComplex *y,
int batchSize,
cusparseSolvePolicy_t policy,
void *pBuffer);
//-------------- csrgemm2 -------------
cusparseStatus_t CUSPARSEAPI cusparseXcsrgemm2_spaceConfig(
csrgemm2Info_t info,
int disable_space_limit);
// internal-use only
cusparseStatus_t CUSPARSEAPI cusparseXcsrgemm2Rows_bufferSize(
cusparseHandle_t handle,
int m,
int n,
int k,
const cusparseMatDescr_t descrA,
int nnzA,
const int *csrRowPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrB,
int nnzB,
const int *csrRowPtrB,
const int *csrColIndB,
csrgemm2Info_t info,
size_t *pBufferSize );
// internal-use only
cusparseStatus_t CUSPARSEAPI cusparseXcsrgemm2Cols_bufferSize(
cusparseHandle_t handle,
int m,
int n,
int k,
const cusparseMatDescr_t descrA,
int nnzA,
const int *csrRowPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrB,
int nnzB,
const int *csrRowPtrB,
const int *csrColIndB,
csrgemm2Info_t info,
size_t *pBufferSize );
cusparseStatus_t CUSPARSEAPI cusparseXcsrgemm2Rows(
cusparseHandle_t handle,
int m,
int n,
int k,
const cusparseMatDescr_t descrA,
int nnzA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrB,
int nnzB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
const cusparseMatDescr_t descrD,
int nnzD,
const int *csrRowPtrD,
const int *csrEndPtrD,
const int *csrColIndD,
const cusparseMatDescr_t descrC,
int *csrRowPtrC,
int *nnzTotalDevHostPtr,
csrgemm2Info_t info,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseXcsrgemm2Cols(
cusparseHandle_t handle,
int m,
int n,
int k,
const cusparseMatDescr_t descrA,
int nnzA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrB,
int nnzB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
const cusparseMatDescr_t descrD,
int nnzD,
const int *csrRowPtrD,
const int *csrEndPtrD,
const int *csrColIndD,
const cusparseMatDescr_t descrC,
const int *csrRowPtrC,
int *csrColIndC,
csrgemm2Info_t info,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseScsrgemm2Vals(
cusparseHandle_t handle,
int m,
int n,
int k,
const float *alpha,
const cusparseMatDescr_t descrA,
int nnzA,
const float *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrB,
int nnzB,
const float *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
const cusparseMatDescr_t descrD,
int nnzD,
const float *csrValD,
const int *csrRowPtrD,
const int *csrEndPtrD,
const int *csrColIndD,
const float *beta,
const cusparseMatDescr_t descrC,
float *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
csrgemm2Info_t info,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseDcsrgemm2Vals(
cusparseHandle_t handle,
int m,
int n,
int k,
const double *alpha,
const cusparseMatDescr_t descrA,
int nnzA,
const double *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrB,
int nnzB,
const double *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
const cusparseMatDescr_t descrD,
int nnzD,
const double *csrValD,
const int *csrRowPtrD,
const int *csrEndPtrD,
const int *csrColIndD,
const double *beta,
const cusparseMatDescr_t descrC,
double *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
csrgemm2Info_t info,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseCcsrgemm2Vals(
cusparseHandle_t handle,
int m,
int n,
int k,
const cuComplex *alpha,
const cusparseMatDescr_t descrA,
int nnzA,
const cuComplex *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrB,
int nnzB,
const cuComplex *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
const cusparseMatDescr_t descrD,
int nnzD,
const cuComplex *csrValD,
const int *csrRowPtrD,
const int *csrEndPtrD,
const int *csrColIndD,
const cuComplex *beta,
const cusparseMatDescr_t descrC,
cuComplex *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
csrgemm2Info_t info,
void *pBuffer );
cusparseStatus_t CUSPARSEAPI cusparseZcsrgemm2Vals(
cusparseHandle_t handle,
int m,
int n,
int k,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descrA,
int nnzA,
const cuDoubleComplex *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
const cusparseMatDescr_t descrB,
int nnzB,
const cuDoubleComplex *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
const cusparseMatDescr_t descrD,
int nnzD,
const cuDoubleComplex *csrValD,
const int *csrRowPtrD,
const int *csrEndPtrD,
const int *csrColIndD,
const cuDoubleComplex *beta,
const cusparseMatDescr_t descrC,
cuDoubleComplex *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
csrgemm2Info_t info,
void *pBuffer );
// ---------------- csr2csc2
cusparseStatus_t CUSPARSEAPI cusparseXcsr2csc2_bufferSizeExt(
cusparseHandle_t handle,
int m,
int n,
int nnz,
const int *csrRowPtr,
const int *csrColInd,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseXcsr2csc2(
cusparseHandle_t handle,
int m,
int n,
int nnz,
const cusparseMatDescr_t descrA,
const int *csrRowPtr,
const int *csrColInd,
int *cscColPtr,
int *cscRowInd,
int *cscValInd,
void *pBuffer);
#if 0
// ------------- CSC ILU0
cusparseStatus_t CUSPARSEAPI cusparseXcscilu02_getLevel(
cscilu02Info_t info,
int **level_ref);
cusparseStatus_t CUSPARSEAPI cusparseXcscilu02_getCscColPtrL(
cscilu02Info_t info,
int **cscColPtrL_ref);
cusparseStatus_t CUSPARSEAPI cusparseCreateCscilu02Info(
cscilu02Info_t *info);
cusparseStatus_t CUSPARSEAPI cusparseDestroyCscilu02Info(
cscilu02Info_t info);
cusparseStatus_t CUSPARSEAPI cusparseXcscilu02_zeroPivot(
cusparseHandle_t handle,
cscilu02Info_t info,
int *position);
cusparseStatus_t CUSPARSEAPI cusparseScscilu02_numericBoost(
cusparseHandle_t handle,
cscilu02Info_t info,
int enable_boost,
double *tol,
float *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseDcscilu02_numericBoost(
cusparseHandle_t handle,
cscilu02Info_t info,
int enable_boost,
double *tol,
double *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseCcscilu02_numericBoost(
cusparseHandle_t handle,
cscilu02Info_t info,
int enable_boost,
double *tol,
cuComplex *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseZcscilu02_numericBoost(
cusparseHandle_t handle,
cscilu02Info_t info,
int enable_boost,
double *tol,
cuDoubleComplex *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseScscilu02_bufferSize(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
float *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
int *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseDcscilu02_bufferSize(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
double *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
int *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseCcscilu02_bufferSize(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuComplex *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
int *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseZcscilu02_bufferSize(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuDoubleComplex *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
int *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseScscilu02_analysis(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const float *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcscilu02_analysis(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const double *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcscilu02_analysis(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const cuComplex *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcscilu02_analysis(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const cuDoubleComplex *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseScscilu02(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
float *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcscilu02(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
double *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcscilu02(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuComplex *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcscilu02(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuDoubleComplex *cscVal,
const int *cscColPtr,
const int *cscEndPtr,
const int *cscRowInd,
cscilu02Info_t info,
cusparseSolvePolicy_t policy,
void *pBuffer);
#endif
// ------------- csrxjusqua
cusparseStatus_t CUSPARSEAPI cusparseXcsrxjusqua(
cusparseHandle_t handle,
int iax,
int iay,
int m,
int n,
const cusparseMatDescr_t descrA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int *csrjusqua );
// ------------ csrxilu0
cusparseStatus_t CUSPARSEAPI cusparseCreateCsrxilu0Info(
csrxilu0Info_t *info);
cusparseStatus_t CUSPARSEAPI cusparseDestroyCsrxilu0Info(
csrxilu0Info_t info);
cusparseStatus_t CUSPARSEAPI cusparseXcsrxilu0_zeroPivot(
cusparseHandle_t handle,
csrxilu0Info_t info,
int *position);
cusparseStatus_t CUSPARSEAPI cusparseScsrxilu0_numericBoost(
cusparseHandle_t handle,
csrxilu0Info_t info,
int enable_boost,
double *tol,
float *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseDcsrxilu0_numericBoost(
cusparseHandle_t handle,
csrxilu0Info_t info,
int enable_boost,
double *tol,
double *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseCcsrxilu0_numericBoost(
cusparseHandle_t handle,
csrxilu0Info_t info,
int enable_boost,
double *tol,
cuComplex *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseZcsrxilu0_numericBoost(
cusparseHandle_t handle,
csrxilu0Info_t info,
int enable_boost,
double *tol,
cuDoubleComplex *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseXcsrxilu0_bufferSizeExt(
cusparseHandle_t handle,
int iax,
int iay,
int m,
int n,
int k,
const cusparseMatDescr_t descrA,
const int *csrRowPtr,
const int *csrEndPtr,
const int *csrColInd,
csrxilu0Info_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseScsrxilu0(
cusparseHandle_t handle,
int iax,
int iay,
int m,
int n,
int k,
const cusparseMatDescr_t descrA,
float *csrVal,
const int *csrRowPtr,
const int *csrEndPtr,
const int *csrColInd,
csrxilu0Info_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrxilu0(
cusparseHandle_t handle,
int iax,
int iay,
int m,
int n,
int k,
const cusparseMatDescr_t descrA,
double *csrVal,
const int *csrRowPtr,
const int *csrEndPtr,
const int *csrColInd,
csrxilu0Info_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrxilu0(
cusparseHandle_t handle,
int iax,
int iay,
int m,
int n,
int k,
const cusparseMatDescr_t descrA,
cuComplex *csrVal,
const int *csrRowPtr,
const int *csrEndPtr,
const int *csrColInd,
csrxilu0Info_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrxilu0(
cusparseHandle_t handle,
int iax,
int iay,
int m,
int n,
int k,
const cusparseMatDescr_t descrA,
cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrEndPtr,
const int *csrColInd,
csrxilu0Info_t info,
void *pBuffer);
// ----------- csrxgemmSchur
cusparseStatus_t CUSPARSEAPI cusparseCreateCsrxgemmSchurInfo(
csrxgemmSchurInfo_t *info);
cusparseStatus_t CUSPARSEAPI cusparseDestroyCsrxgemmSchurInfo(
csrxgemmSchurInfo_t info);
cusparseStatus_t CUSPARSEAPI cusparseXcsrxgemmSchur_bufferSizeExt(
cusparseHandle_t handle,
int m,
int n,
int k,
int iax,
int iay,
const cusparseMatDescr_t descrA,
int nnzA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
int nnzB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
int icx,
int icy,
const cusparseMatDescr_t descrC,
int nnzC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
csrxgemmSchurInfo_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseScsrxgemmSchur(
cusparseHandle_t handle,
int m,
int n,
int k,
int iax,
int iay,
const cusparseMatDescr_t descrA,
int nnzA,
const float *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
int nnzB,
const float *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
int icx,
int icy,
const cusparseMatDescr_t descrC,
int nnzC,
float *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
csrxgemmSchurInfo_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrxgemmSchur(
cusparseHandle_t handle,
int m,
int n,
int k,
int iax,
int iay,
const cusparseMatDescr_t descrA,
int nnzA,
const double *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
int nnzB,
const double *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
int icx,
int icy,
const cusparseMatDescr_t descrC,
int nnzC,
double *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
csrxgemmSchurInfo_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrxgemmSchur(
cusparseHandle_t handle,
int m,
int n,
int k,
int iax,
int iay,
const cusparseMatDescr_t descrA,
int nnzA,
const cuComplex *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
int nnzB,
const cuComplex *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
int icx,
int icy,
const cusparseMatDescr_t descrC,
int nnzC,
cuComplex *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
csrxgemmSchurInfo_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrxgemmSchur(
cusparseHandle_t handle,
int m,
int n,
int k,
int iax,
int iay,
const cusparseMatDescr_t descrA,
int nnzA,
const cuDoubleComplex *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
int nnzB,
const cuDoubleComplex *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
int icx,
int icy,
const cusparseMatDescr_t descrC,
int nnzC,
cuDoubleComplex *csrValC,
const int *csrRowPtrC,
const int *csrEndPtrC,
const int *csrColIndC,
csrxgemmSchurInfo_t info,
void *pBuffer);
// ---------- csrxtrsm
#if 0
cusparseStatus_t CUSPARSEAPI cusparseCreateCsrxtrsmInfo(
csrxtrsmInfo_t *info);
cusparseStatus_t CUSPARSEAPI cusparseDestroyCsrxtrsmInfo(
csrxtrsmInfo_t info);
cusparseStatus_t CUSPARSEAPI cusparseXcsrxtrsm_bufferSizeExt(
cusparseHandle_t handle,
int m,
int n,
cusparseSideMode_t side,
int iax,
int iay,
const cusparseMatDescr_t descrA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
csrxtrsmInfo_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseScsrxtrsm(
cusparseHandle_t handle,
int m,
int n,
cusparseSideMode_t side,
int iax,
int iay,
const cusparseMatDescr_t descrA,
const float *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
float *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
csrxtrsmInfo_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrxtrsm(
cusparseHandle_t handle,
int m,
int n,
cusparseSideMode_t side,
int iax,
int iay,
const cusparseMatDescr_t descrA,
const double *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
double *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
csrxtrsmInfo_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrxtrsm(
cusparseHandle_t handle,
int m,
int n,
cusparseSideMode_t side,
int iax,
int iay,
const cusparseMatDescr_t descrA,
const cuComplex *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
cuComplex *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
csrxtrsmInfo_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrxtrsm(
cusparseHandle_t handle,
int m,
int n,
cusparseSideMode_t side,
int iax,
int iay,
const cusparseMatDescr_t descrA,
const cuDoubleComplex *csrValA,
const int *csrRowPtrA,
const int *csrEndPtrA,
const int *csrColIndA,
int ibx,
int iby,
const cusparseMatDescr_t descrB,
cuDoubleComplex *csrValB,
const int *csrRowPtrB,
const int *csrEndPtrB,
const int *csrColIndB,
csrxtrsmInfo_t info,
void *pBuffer);
#endif
// ------ CSR ilu03
cusparseStatus_t CUSPARSEAPI cusparseCreateCsrilu03Info(
csrilu03Info_t *info);
cusparseStatus_t CUSPARSEAPI cusparseDestroyCsrilu03Info(
csrilu03Info_t info);
cusparseStatus_t CUSPARSEAPI cusparseXcsrilu03_bufferSizeExt(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
const int *csrRowPtr,
const int *csrColInd,
csrilu03Info_t info,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseXcsrilu03_zeroPivot(
cusparseHandle_t handle,
csrilu03Info_t info,
int *position);
cusparseStatus_t CUSPARSEAPI cusparseScsrilu03_numericBoost(
cusparseHandle_t handle,
csrilu03Info_t info,
int enable_boost,
double *tol,
float *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseDcsrilu03_numericBoost(
cusparseHandle_t handle,
csrilu03Info_t info,
int enable_boost,
double *tol,
double *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseCcsrilu03_numericBoost(
cusparseHandle_t handle,
csrilu03Info_t info,
int enable_boost,
double *tol,
cuComplex *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseZcsrilu03_numericBoost(
cusparseHandle_t handle,
csrilu03Info_t info,
int enable_boost,
double *tol,
cuDoubleComplex *numeric_boost);
cusparseStatus_t CUSPARSEAPI cusparseScsrilu03(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
float *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csrilu03Info_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrilu03(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csrilu03Info_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrilu03(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csrilu03Info_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrilu03(
cusparseHandle_t handle,
int m,
int nnz,
const cusparseMatDescr_t descrA,
cuDoubleComplex *csrVal,
const int *csrRowPtr,
const int *csrColInd,
csrilu03Info_t info,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseXcsrValid(
cusparseHandle_t handle,
int m,
int n,
int nnzA,
const cusparseMatDescr_t descrA,
const int *csrRowPtrA,
const int *csrColIndA,
int *valid);
cusparseStatus_t CUSPARSEAPI cusparseScsrmm3(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnz,
const float *alpha,
const cusparseMatDescr_t descrA,
const float *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const float *B,
int ldb,
const float *beta,
float *C,
int ldc,
void *buffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrmm3(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnz,
const double *alpha,
const cusparseMatDescr_t descrA,
const double *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const double *B,
int ldb,
const double *beta,
double *C,
int ldc,
void *buffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrmm3(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnz,
const cuComplex *alpha,
const cusparseMatDescr_t descrA,
const cuComplex *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const cuComplex *B,
int ldb,
const cuComplex *beta,
cuComplex *C,
int ldc,
void *buffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrmm3(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnz,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descrA,
const cuDoubleComplex *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const cuDoubleComplex *B,
int ldb,
const cuDoubleComplex *beta,
cuDoubleComplex *C,
int ldc,
void *buffer);
cusparseStatus_t CUSPARSEAPI cusparseStranspose(
cusparseHandle_t handle,
cusparseOperation_t transa,
int m,
int n,
const float *alpha,
const float *A,
int lda,
float *C,
int ldc);
cusparseStatus_t CUSPARSEAPI cusparseDtranspose(
cusparseHandle_t handle,
cusparseOperation_t transa,
int m,
int n,
const double *alpha,
const double *A,
int lda,
double *C,
int ldc);
cusparseStatus_t CUSPARSEAPI cusparseCtranspose(
cusparseHandle_t handle,
cusparseOperation_t transa,
int m,
int n,
const cuComplex *alpha,
const cuComplex *A,
int lda,
cuComplex *C,
int ldc);
cusparseStatus_t CUSPARSEAPI cusparseZtranspose(
cusparseHandle_t handle,
cusparseOperation_t transa,
int m,
int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A,
int lda,
cuDoubleComplex *C,
int ldc);
cusparseStatus_t CUSPARSEAPI cusparseScsrmv_binary(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const float *alpha,
const cusparseMatDescr_t descra,
const int *csrRowPtr,
const int *csrColInd,
const float *x,
const float *beta,
float *y);
cusparseStatus_t CUSPARSEAPI cusparseDcsrmv_binary(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const double *alpha,
const cusparseMatDescr_t descra,
const int *csrRowPtr,
const int *csrColInd,
const double *x,
const double *beta,
double *y);
cusparseStatus_t CUSPARSEAPI cusparseCcsrmv_binary(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const cuComplex *alpha,
const cusparseMatDescr_t descra,
const int *csrRowPtr,
const int *csrColInd,
const cuComplex *x,
const cuComplex *beta,
cuComplex *y);
cusparseStatus_t CUSPARSEAPI cusparseZcsrmv_binary(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descra,
const int *csrRowPtr,
const int *csrColInd,
const cuDoubleComplex *x,
const cuDoubleComplex *beta,
cuDoubleComplex *y);
cusparseStatus_t CUSPARSEAPI cusparseCreateCsrmmInfo(
csrmmInfo_t *info);
cusparseStatus_t CUSPARSEAPI cusparseDestroyCsrmmInfo(
csrmmInfo_t info);
cusparseStatus_t CUSPARSEAPI csrmm4_analysis(
cusparseHandle_t handle,
int m, // number of rows of A
int k, // number of columns of A
int nnzA, // number of nonzeros of A
const cusparseMatDescr_t descrA,
const int *csrRowPtrA, // <int> m+1
const int *csrColIndA, // <int> nnzA
csrmmInfo_t info,
double *ratio // nnzB / nnzA
);
cusparseStatus_t CUSPARSEAPI cusparseScsrmm4(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnz,
const float *alpha,
const cusparseMatDescr_t descrA,
const float *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const float *B,
int ldb,
const float *beta,
float *C,
int ldc,
csrmmInfo_t info,
void *buffer);
cusparseStatus_t CUSPARSEAPI cusparseDcsrmm4(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnz,
const double *alpha,
const cusparseMatDescr_t descrA,
const double *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const double *B,
int ldb,
const double *beta,
double *C,
int ldc,
csrmmInfo_t info,
void *buffer);
cusparseStatus_t CUSPARSEAPI cusparseCcsrmm4(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnz,
const cuComplex *alpha,
const cusparseMatDescr_t descrA,
const cuComplex *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const cuComplex *B,
int ldb,
const cuComplex *beta,
cuComplex *C,
int ldc,
csrmmInfo_t info,
void *buffer);
cusparseStatus_t CUSPARSEAPI cusparseZcsrmm4(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnz,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descrA,
const cuDoubleComplex *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const cuDoubleComplex *B,
int ldb,
const cuDoubleComplex *beta,
cuDoubleComplex *C,
int ldc,
csrmmInfo_t info,
void *buffer);
cusparseStatus_t CUSPARSEAPI cusparseScsrmm5(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnzA,
const float *alpha,
const cusparseMatDescr_t descrA,
const float *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const float *B,
int ldb,
const float *beta,
float *C,
int ldc
);
cusparseStatus_t CUSPARSEAPI cusparseDcsrmm5(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnzA,
const double *alpha,
const cusparseMatDescr_t descrA,
const double *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const double *B,
int ldb,
const double *beta,
double *C,
int ldc
);
cusparseStatus_t CUSPARSEAPI cusparseScsrmm6(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnzA,
const float *alpha,
const cusparseMatDescr_t descrA,
const float *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const float *B,
int ldb,
const float *beta,
float *C,
int ldc
);
cusparseStatus_t CUSPARSEAPI cusparseDcsrmm6(
cusparseHandle_t handle,
cusparseOperation_t transa,
cusparseOperation_t transb,
int m,
int n,
int k,
int nnzA,
const double *alpha,
const cusparseMatDescr_t descrA,
const double *csrValA,
const int *csrRowPtrA,
const int *csrColIndA,
const double *B,
int ldb,
const double *beta,
double *C,
int ldc
);
cusparseStatus_t CUSPARSEAPI cusparseSmax(
cusparseHandle_t handle,
int n,
const float *x,
float *valueHost,
float *work /* at least n+1 */
);
cusparseStatus_t CUSPARSEAPI cusparseDmax(
cusparseHandle_t handle,
int n,
const double *x,
double *valueHost,
double *work /* at least n+1 */
);
cusparseStatus_t CUSPARSEAPI cusparseSmin(
cusparseHandle_t handle,
int n,
const float *x,
float *valueHost,
float *work /* at least n+1 */
);
cusparseStatus_t CUSPARSEAPI cusparseDmin(
cusparseHandle_t handle,
int n,
const double *x,
double *valueHost,
double *work /* at least n+1 */
);
cusparseStatus_t CUSPARSEAPI cusparseI16sort_internal_bufferSizeExt(
cusparseHandle_t handle,
int n,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseI16sort_internal(
cusparseHandle_t handle,
int num_bits, /* <= 16 */
int n,
unsigned short *key,
int *P,
int ascend,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseI32sort_internal_bufferSizeExt(
cusparseHandle_t handle,
int n,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseI32sort_internal(
cusparseHandle_t handle,
int num_bits, /* <= 32 */
int n,
unsigned int *key,
int *P,
int ascend,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseI64sort_internal_bufferSizeExt(
cusparseHandle_t handle,
int n,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseI64sort_internal(
cusparseHandle_t handle,
int num_bits, /* <= 64 */
int n,
unsigned long long *key,
int *P,
int ascend,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseIsort_bufferSizeExt(
cusparseHandle_t handle,
int n,
const int *key,
const int *P,
int ascend,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseIsort(
cusparseHandle_t handle,
int n,
int *key,
int *P,
int ascend,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseSsort_bufferSizeExt(
cusparseHandle_t handle,
int n,
const float *key,
const int *P,
int ascend,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseSsort(
cusparseHandle_t handle,
int n,
float *key,
int *P,
int ascend,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDsort_bufferSizeExt(
cusparseHandle_t handle,
int n,
const double *key,
const int *P,
int ascend,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseDsort(
cusparseHandle_t handle,
int n,
double *key,
int *P,
int ascend,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseHsort_bufferSizeExt(
cusparseHandle_t handle,
int n,
const __half *key,
const int *P,
int ascend,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseHsort(
cusparseHandle_t handle,
int n,
__half *key_fp16,
int *P,
int ascend,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseHsortsign_bufferSizeExt(
cusparseHandle_t handle,
int n,
const __half *key,
const int *P,
int ascend,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseSsortsign_bufferSizeExt(
cusparseHandle_t handle,
int n,
const float *key,
const int *P,
int ascend,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseDsortsign_bufferSizeExt(
cusparseHandle_t handle,
int n,
const double *key,
const int *P,
int ascend,
size_t *pBufferSize);
cusparseStatus_t CUSPARSEAPI cusparseIsortsign_bufferSizeExt(
cusparseHandle_t handle,
int n,
const int *key,
const int *P,
int ascend,
size_t *pBufferSize);
//#if defined(__cplusplus)
cusparseStatus_t CUSPARSEAPI cusparseHsortsign(
cusparseHandle_t handle,
int n,
__half *key,
int *P,
int ascend,
int *h_nnz_bucket0, /* host */
void *pBuffer);
//#endif
cusparseStatus_t CUSPARSEAPI cusparseSsortsign(
cusparseHandle_t handle,
int n,
float *key,
int *P,
int ascend,
int *h_nnz_bucket0, /* host */
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDsortsign(
cusparseHandle_t handle,
int n,
double *key,
int *P,
int ascend,
int *h_nnz_bucket0, /* host */
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseIsortsign(
cusparseHandle_t handle,
int n,
int *key,
int *P,
int ascend,
int *h_nnz_bucket0, /* host */
void *pBuffer);
//----------------------------------------------
cusparseStatus_t CUSPARSEAPI cusparseDDcsrMv_hyb(
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const double *alpha,
const cusparseMatDescr_t descra,
const double *csrVal,
const int *csrRowPtr,
const int *csrColInd,
const double *x,
const double *beta,
double *y);
/*
* gtsv2Batch: cuThomas algorithm
* gtsv3Batch: QR
* gtsv4Batch: LU with partial pivoting
*/
cusparseStatus_t CUSPARSEAPI cusparseSgtsv2Batch(
cusparseHandle_t handle,
int n,
float *dl,
float *d,
float *du,
float *x,
int batchCount);
cusparseStatus_t CUSPARSEAPI cusparseDgtsv2Batch(
cusparseHandle_t handle,
int n,
double *dl,
double *d,
double *du,
double *x,
int batchCount);
cusparseStatus_t CUSPARSEAPI cusparseCgtsv2Batch(
cusparseHandle_t handle,
int n,
cuComplex *dl,
cuComplex *d,
cuComplex *du,
cuComplex *x,
int batchCount);
cusparseStatus_t CUSPARSEAPI cusparseZgtsv2Batch(
cusparseHandle_t handle,
int n,
cuDoubleComplex *dl,
cuDoubleComplex *d,
cuDoubleComplex *du,
cuDoubleComplex *x,
int batchCount);
cusparseStatus_t CUSPARSEAPI cusparseSgtsv3Batch_bufferSizeExt(
cusparseHandle_t handle,
int n,
const float *dl,
const float *d,
const float *du,
const float *x,
int batchSize,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseDgtsv3Batch_bufferSizeExt(
cusparseHandle_t handle,
int n,
const double *dl,
const double *d,
const double *du,
const double *x,
int batchSize,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseCgtsv3Batch_bufferSizeExt(
cusparseHandle_t handle,
int n,
const cuComplex *dl,
const cuComplex *d,
const cuComplex *du,
const cuComplex *x,
int batchSize,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseZgtsv3Batch_bufferSizeExt(
cusparseHandle_t handle,
int n,
const cuDoubleComplex *dl,
const cuDoubleComplex *d,
const cuDoubleComplex *du,
const cuDoubleComplex *x,
int batchSize,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseSgtsv3Batch(
cusparseHandle_t handle,
int n,
float *dl,
float *d,
float *du,
float *x,
int batchSize,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDgtsv3Batch(
cusparseHandle_t handle,
int n,
double *dl,
double *d,
double *du,
double *x,
int batchSize,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCgtsv3Batch(
cusparseHandle_t handle,
int n,
cuComplex *dl,
cuComplex *d,
cuComplex *du,
cuComplex *x,
int batchSize,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZgtsv3Batch(
cusparseHandle_t handle,
int n,
cuDoubleComplex *dl,
cuDoubleComplex *d,
cuDoubleComplex *du,
cuDoubleComplex *x,
int batchSize,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseSgtsv4Batch_bufferSizeExt(
cusparseHandle_t handle,
int n,
const float *dl,
const float *d,
const float *du,
const float *x,
int batchSize,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseDgtsv4Batch_bufferSizeExt(
cusparseHandle_t handle,
int n,
const double *dl,
const double *d,
const double *du,
const double *x,
int batchSize,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseCgtsv4Batch_bufferSizeExt(
cusparseHandle_t handle,
int n,
const cuComplex *dl,
const cuComplex *d,
const cuComplex *du,
const cuComplex *x,
int batchSize,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseZgtsv4Batch_bufferSizeExt(
cusparseHandle_t handle,
int n,
const cuDoubleComplex *dl,
const cuDoubleComplex *d,
const cuDoubleComplex *du,
const cuDoubleComplex *x,
int batchSize,
size_t *pBufferSizeInBytes);
cusparseStatus_t CUSPARSEAPI cusparseSgtsv4Batch(
cusparseHandle_t handle,
int n,
float *dl,
float *d,
float *du,
float *x,
int batchSize,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseDgtsv4Batch(
cusparseHandle_t handle,
int n,
double *dl,
double *d,
double *du,
double *x,
int batchSize,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseCgtsv4Batch(
cusparseHandle_t handle,
int n,
cuComplex *dl,
cuComplex *d,
cuComplex *du,
cuComplex *x,
int batchSize,
void *pBuffer);
cusparseStatus_t CUSPARSEAPI cusparseZgtsv4Batch(
cusparseHandle_t handle,
int n,
cuDoubleComplex *dl,
cuDoubleComplex *d,
cuDoubleComplex *du,
cuDoubleComplex *x,
int batchSize,
void *pBuffer);
#if defined(__cplusplus)
}
#endif /* __cplusplus */
#endif /* CUSPARSE_INTERNAL_H_ */
| 0 |
rapidsai_public_repos/nvgraph/external | rapidsai_public_repos/nvgraph/external/cub_semiring/util_arch.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Static architectural properties by SM version.
*/
#pragma once
#include "util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
#if (__CUDACC_VER_MAJOR__ >= 9) && !defined(CUB_USE_COOPERATIVE_GROUPS)
#define CUB_USE_COOPERATIVE_GROUPS
#endif
/// CUB_PTX_ARCH reflects the PTX version targeted by the active compiler pass (or zero during the host pass).
#ifndef CUB_PTX_ARCH
#ifndef __CUDA_ARCH__
#define CUB_PTX_ARCH 0
#else
#define CUB_PTX_ARCH __CUDA_ARCH__
#endif
#endif
/// Whether or not the source targeted by the active compiler pass is allowed to invoke device kernels or methods from the CUDA runtime API.
#ifndef CUB_RUNTIME_FUNCTION
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__>= 350 && defined(__CUDACC_RDC__))
#define CUB_RUNTIME_ENABLED
#define CUB_RUNTIME_FUNCTION __host__ __device__
#else
#define CUB_RUNTIME_FUNCTION __host__
#endif
#endif
/// Number of threads per warp
#ifndef CUB_LOG_WARP_THREADS
#define CUB_LOG_WARP_THREADS(arch) \
(5)
#define CUB_WARP_THREADS(arch) \
(1 << CUB_LOG_WARP_THREADS(arch))
#define CUB_PTX_WARP_THREADS CUB_WARP_THREADS(CUB_PTX_ARCH)
#define CUB_PTX_LOG_WARP_THREADS CUB_LOG_WARP_THREADS(CUB_PTX_ARCH)
#endif
/// Number of smem banks
#ifndef CUB_LOG_SMEM_BANKS
#define CUB_LOG_SMEM_BANKS(arch) \
((arch >= 200) ? \
(5) : \
(4))
#define CUB_SMEM_BANKS(arch) \
(1 << CUB_LOG_SMEM_BANKS(arch))
#define CUB_PTX_LOG_SMEM_BANKS CUB_LOG_SMEM_BANKS(CUB_PTX_ARCH)
#define CUB_PTX_SMEM_BANKS CUB_SMEM_BANKS(CUB_PTX_ARCH)
#endif
/// Oversubscription factor
#ifndef CUB_SUBSCRIPTION_FACTOR
#define CUB_SUBSCRIPTION_FACTOR(arch) \
((arch >= 300) ? \
(5) : \
((arch >= 200) ? \
(3) : \
(10)))
#define CUB_PTX_SUBSCRIPTION_FACTOR CUB_SUBSCRIPTION_FACTOR(CUB_PTX_ARCH)
#endif
/// Prefer padding overhead vs X-way conflicts greater than this threshold
#ifndef CUB_PREFER_CONFLICT_OVER_PADDING
#define CUB_PREFER_CONFLICT_OVER_PADDING(arch) \
((arch >= 300) ? \
(1) : \
(4))
#define CUB_PTX_PREFER_CONFLICT_OVER_PADDING CUB_PREFER_CONFLICT_OVER_PADDING(CUB_PTX_ARCH)
#endif
/// Scale down the number of warps to keep same amount of "tile" storage as the nominal configuration for 4B data. Minimum of two warps.
#ifndef CUB_BLOCK_THREADS
#define CUB_BLOCK_THREADS(NOMINAL_4B_BLOCK_THREADS, T, PTX_ARCH) \
(CUB_MIN( \
NOMINAL_4B_BLOCK_THREADS * 2, \
CUB_WARP_THREADS(PTX_ARCH) * CUB_MAX( \
(NOMINAL_4B_BLOCK_THREADS / CUB_WARP_THREADS(PTX_ARCH)) * 3 / 4, \
(NOMINAL_4B_BLOCK_THREADS / CUB_WARP_THREADS(PTX_ARCH)) * 4 / sizeof(T))))
#endif
/// Scale up/down number of items per thread to keep the same amount of "tile" storage as the nominal configuration for 4B data. Minimum 1 item per thread
#ifndef CUB_ITEMS_PER_THREAD
#define CUB_ITEMS_PER_THREAD(NOMINAL_4B_ITEMS_PER_THREAD, NOMINAL_4B_BLOCK_THREADS, T, PTX_ARCH) \
(CUB_MIN( \
NOMINAL_4B_ITEMS_PER_THREAD * 2, \
CUB_MAX( \
1, \
(NOMINAL_4B_ITEMS_PER_THREAD * NOMINAL_4B_BLOCK_THREADS * 4 / sizeof(T)) / CUB_BLOCK_THREADS(NOMINAL_4B_BLOCK_THREADS, T, PTX_ARCH))))
#endif
/// Define both nominal threads-per-block and items-per-thread
#ifndef CUB_NOMINAL_CONFIG
#define CUB_NOMINAL_CONFIG(NOMINAL_4B_BLOCK_THREADS, NOMINAL_4B_ITEMS_PER_THREAD, T) \
CUB_BLOCK_THREADS(NOMINAL_4B_BLOCK_THREADS, T, 200), \
CUB_ITEMS_PER_THREAD(NOMINAL_4B_ITEMS_PER_THREAD, NOMINAL_4B_BLOCK_THREADS, T, 200)
#endif
#endif // Do not document
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external | rapidsai_public_repos/nvgraph/external/cub_semiring/cub.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* CUB umbrella include file
*/
#pragma once
// Block
#include "block/block_histogram.cuh"
#include "block/block_discontinuity.cuh"
#include "block/block_exchange.cuh"
#include "block/block_load.cuh"
#include "block/block_radix_rank.cuh"
#include "block/block_radix_sort.cuh"
#include "block/block_reduce.cuh"
#include "block/block_scan.cuh"
#include "block/block_store.cuh"
//#include "block/block_shift.cuh"
// Device
#include "device/device_histogram.cuh"
#include "device/device_partition.cuh"
#include "device/device_radix_sort.cuh"
#include "device/device_reduce.cuh"
#include "device/device_run_length_encode.cuh"
#include "device/device_scan.cuh"
#include "device/device_segmented_radix_sort.cuh"
#include "device/device_segmented_reduce.cuh"
#include "device/device_select.cuh"
#include "device/device_spmv.cuh"
// Grid
//#include "grid/grid_barrier.cuh"
#include "grid/grid_even_share.cuh"
#include "grid/grid_mapping.cuh"
#include "grid/grid_queue.cuh"
// Thread
#include "thread/thread_load.cuh"
#include "thread/thread_operators.cuh"
#include "thread/thread_reduce.cuh"
#include "thread/thread_scan.cuh"
#include "thread/thread_store.cuh"
// Warp
#include "warp/warp_reduce.cuh"
#include "warp/warp_scan.cuh"
// Iterator
#include "iterator/arg_index_input_iterator.cuh"
#include "iterator/cache_modified_input_iterator.cuh"
#include "iterator/cache_modified_output_iterator.cuh"
#include "iterator/constant_input_iterator.cuh"
#include "iterator/counting_input_iterator.cuh"
#include "iterator/tex_obj_input_iterator.cuh"
#include "iterator/tex_ref_input_iterator.cuh"
#include "iterator/transform_input_iterator.cuh"
// Util
#include "util_arch.cuh"
#include "util_debug.cuh"
#include "util_device.cuh"
#include "util_macro.cuh"
#include "util_ptx.cuh"
#include "util_type.cuh"
| 0 |
rapidsai_public_repos/nvgraph/external | rapidsai_public_repos/nvgraph/external/cub_semiring/util_namespace.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Place-holder for prefixing the cub namespace
*/
#pragma once
// For example:
#define CUB_NS_PREFIX namespace cub_semiring {
#define CUB_NS_POSTFIX }
#ifndef CUB_NS_PREFIX
#define CUB_NS_PREFIX
#endif
#ifndef CUB_NS_POSTFIX
#define CUB_NS_POSTFIX
#endif
| 0 |
rapidsai_public_repos/nvgraph/external | rapidsai_public_repos/nvgraph/external/cub_semiring/util_ptx.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* PTX intrinsics
*/
#pragma once
#include "util_type.cuh"
#include "util_arch.cuh"
#include "util_namespace.cuh"
#include "util_debug.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilPtx
* @{
*/
/******************************************************************************
* PTX helper macros
******************************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* Register modifier for pointer-types (for inlining PTX assembly)
*/
#if defined(_WIN64) || defined(__LP64__)
#define __CUB_LP64__ 1
// 64-bit register modifier for inlined asm
#define _CUB_ASM_PTR_ "l"
#define _CUB_ASM_PTR_SIZE_ "u64"
#else
#define __CUB_LP64__ 0
// 32-bit register modifier for inlined asm
#define _CUB_ASM_PTR_ "r"
#define _CUB_ASM_PTR_SIZE_ "u32"
#endif
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Inlined PTX intrinsics
******************************************************************************/
/**
* \brief Shift-right then add. Returns (\p x >> \p shift) + \p addend.
*/
__device__ __forceinline__ unsigned int SHR_ADD(
unsigned int x,
unsigned int shift,
unsigned int addend)
{
unsigned int ret;
#if CUB_PTX_ARCH >= 200
asm ("vshr.u32.u32.u32.clamp.add %0, %1, %2, %3;" :
"=r"(ret) : "r"(x), "r"(shift), "r"(addend));
#else
ret = (x >> shift) + addend;
#endif
return ret;
}
/**
* \brief Shift-left then add. Returns (\p x << \p shift) + \p addend.
*/
__device__ __forceinline__ unsigned int SHL_ADD(
unsigned int x,
unsigned int shift,
unsigned int addend)
{
unsigned int ret;
#if CUB_PTX_ARCH >= 200
asm ("vshl.u32.u32.u32.clamp.add %0, %1, %2, %3;" :
"=r"(ret) : "r"(x), "r"(shift), "r"(addend));
#else
ret = (x << shift) + addend;
#endif
return ret;
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* Bitfield-extract.
*/
template <typename UnsignedBits, int BYTE_LEN>
__device__ __forceinline__ unsigned int BFE(
UnsignedBits source,
unsigned int bit_start,
unsigned int num_bits,
Int2Type<BYTE_LEN> /*byte_len*/)
{
unsigned int bits;
#if CUB_PTX_ARCH >= 200
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"((unsigned int) source), "r"(bit_start), "r"(num_bits));
#else
const unsigned int MASK = (1 << num_bits) - 1;
bits = (source >> bit_start) & MASK;
#endif
return bits;
}
/**
* Bitfield-extract for 64-bit types.
*/
template <typename UnsignedBits>
__device__ __forceinline__ unsigned int BFE(
UnsignedBits source,
unsigned int bit_start,
unsigned int num_bits,
Int2Type<8> /*byte_len*/)
{
const unsigned long long MASK = (1ull << num_bits) - 1;
return (source >> bit_start) & MASK;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Bitfield-extract. Extracts \p num_bits from \p source starting at bit-offset \p bit_start. The input \p source may be an 8b, 16b, 32b, or 64b unsigned integer type.
*/
template <typename UnsignedBits>
__device__ __forceinline__ unsigned int BFE(
UnsignedBits source,
unsigned int bit_start,
unsigned int num_bits)
{
return BFE(source, bit_start, num_bits, Int2Type<sizeof(UnsignedBits)>());
}
/**
* \brief Bitfield insert. Inserts the \p num_bits least significant bits of \p y into \p x at bit-offset \p bit_start.
*/
__device__ __forceinline__ void BFI(
unsigned int &ret,
unsigned int x,
unsigned int y,
unsigned int bit_start,
unsigned int num_bits)
{
#if CUB_PTX_ARCH >= 200
asm ("bfi.b32 %0, %1, %2, %3, %4;" :
"=r"(ret) : "r"(y), "r"(x), "r"(bit_start), "r"(num_bits));
#else
x <<= bit_start;
unsigned int MASK_X = ((1 << num_bits) - 1) << bit_start;
unsigned int MASK_Y = ~MASK_X;
ret = (y & MASK_Y) | (x & MASK_X);
#endif
}
/**
* \brief Three-operand add. Returns \p x + \p y + \p z.
*/
__device__ __forceinline__ unsigned int IADD3(unsigned int x, unsigned int y, unsigned int z)
{
#if CUB_PTX_ARCH >= 200
asm ("vadd.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(x) : "r"(x), "r"(y), "r"(z));
#else
x = x + y + z;
#endif
return x;
}
/**
* \brief Byte-permute. Pick four arbitrary bytes from two 32-bit registers, and reassemble them into a 32-bit destination register. For SM2.0 or later.
*
* \par
* The bytes in the two source registers \p a and \p b are numbered from 0 to 7:
* {\p b, \p a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}. For each of the four bytes
* {b3, b2, b1, b0} selected in the return value, a 4-bit selector is defined within
* the four lower "nibbles" of \p index: {\p index } = {n7, n6, n5, n4, n3, n2, n1, n0}
*
* \par Snippet
* The code snippet below illustrates byte-permute.
* \par
* \code
* #include <cub/cub.cuh>
*
* __global__ void ExampleKernel(...)
* {
* int a = 0x03020100;
* int b = 0x07060504;
* int index = 0x00007531;
*
* int selected = PRMT(a, b, index); // 0x07050301
*
* \endcode
*
*/
__device__ __forceinline__ int PRMT(unsigned int a, unsigned int b, unsigned int index)
{
int ret;
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(a), "r"(b), "r"(index));
return ret;
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* Sync-threads barrier.
*/
__device__ __forceinline__ void BAR(int count)
{
asm volatile("bar.sync 1, %0;" : : "r"(count));
}
/**
* CTA barrier
*/
__device__ __forceinline__ void CTA_SYNC()
{
__syncthreads();
}
/**
* CTA barrier with predicate
*/
__device__ __forceinline__ int CTA_SYNC_AND(int p)
{
return __syncthreads_and(p);
}
/**
* Warp barrier
*/
__device__ __forceinline__ void WARP_SYNC(unsigned int member_mask)
{
#ifdef CUB_USE_COOPERATIVE_GROUPS
__syncwarp(member_mask);
#endif
}
/**
* Warp any
*/
__device__ __forceinline__ int WARP_ANY(int predicate, unsigned int member_mask)
{
#ifdef CUB_USE_COOPERATIVE_GROUPS
return __any_sync(member_mask, predicate);
#else
return ::__any(predicate);
#endif
}
/**
* Warp any
*/
__device__ __forceinline__ int WARP_ALL(int predicate, unsigned int member_mask)
{
#ifdef CUB_USE_COOPERATIVE_GROUPS
return __all_sync(member_mask, predicate);
#else
return ::__all(predicate);
#endif
}
/**
* Warp ballot
*/
__device__ __forceinline__ int WARP_BALLOT(int predicate, unsigned int member_mask)
{
#ifdef CUB_USE_COOPERATIVE_GROUPS
return __ballot_sync(member_mask, predicate);
#else
return __ballot(predicate);
#endif
}
/**
* Warp synchronous shfl_up
*/
__device__ __forceinline__
unsigned int SHFL_UP_SYNC(unsigned int word, int src_offset, int first_lane, unsigned int member_mask)
{
#ifdef CUB_USE_COOPERATIVE_GROUPS
asm volatile("shfl.sync.up.b32 %0, %1, %2, %3, %4;"
: "=r"(word) : "r"(word), "r"(src_offset), "r"(first_lane), "r"(member_mask));
#else
asm volatile("shfl.up.b32 %0, %1, %2, %3;"
: "=r"(word) : "r"(word), "r"(src_offset), "r"(first_lane));
#endif
return word;
}
/**
* Warp synchronous shfl_down
*/
__device__ __forceinline__
unsigned int SHFL_DOWN_SYNC(unsigned int word, int src_offset, int last_lane, unsigned int member_mask)
{
#ifdef CUB_USE_COOPERATIVE_GROUPS
asm volatile("shfl.sync.down.b32 %0, %1, %2, %3, %4;"
: "=r"(word) : "r"(word), "r"(src_offset), "r"(last_lane), "r"(member_mask));
#else
asm volatile("shfl.down.b32 %0, %1, %2, %3;"
: "=r"(word) : "r"(word), "r"(src_offset), "r"(last_lane));
#endif
return word;
}
/**
* Warp synchronous shfl_idx
*/
__device__ __forceinline__
unsigned int SHFL_IDX_SYNC(unsigned int word, int src_lane, int last_lane, unsigned int member_mask)
{
#ifdef CUB_USE_COOPERATIVE_GROUPS
asm volatile("shfl.sync.idx.b32 %0, %1, %2, %3, %4;"
: "=r"(word) : "r"(word), "r"(src_lane), "r"(last_lane), "r"(member_mask));
#else
asm volatile("shfl.idx.b32 %0, %1, %2, %3;"
: "=r"(word) : "r"(word), "r"(src_lane), "r"(last_lane));
#endif
return word;
}
/**
* Floating point multiply. (Mantissa LSB rounds towards zero.)
*/
__device__ __forceinline__ float FMUL_RZ(float a, float b)
{
float d;
asm ("mul.rz.f32 %0, %1, %2;" : "=f"(d) : "f"(a), "f"(b));
return d;
}
/**
* Floating point multiply-add. (Mantissa LSB rounds towards zero.)
*/
__device__ __forceinline__ float FFMA_RZ(float a, float b, float c)
{
float d;
asm ("fma.rz.f32 %0, %1, %2, %3;" : "=f"(d) : "f"(a), "f"(b), "f"(c));
return d;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Terminates the calling thread
*/
__device__ __forceinline__ void ThreadExit() {
asm volatile("exit;");
}
/**
* \brief Abort execution and generate an interrupt to the host CPU
*/
__device__ __forceinline__ void ThreadTrap() {
asm volatile("trap;");
}
/**
* \brief Returns the row-major linear thread identifier for a multidimensional thread block
*/
__device__ __forceinline__ int RowMajorTid(int block_dim_x, int block_dim_y, int block_dim_z)
{
return ((block_dim_z == 1) ? 0 : (threadIdx.z * block_dim_x * block_dim_y)) +
((block_dim_y == 1) ? 0 : (threadIdx.y * block_dim_x)) +
threadIdx.x;
}
/**
* \brief Returns the warp lane ID of the calling thread
*/
__device__ __forceinline__ unsigned int LaneId()
{
unsigned int ret;
asm ("mov.u32 %0, %%laneid;" : "=r"(ret) );
return ret;
}
/**
* \brief Returns the warp ID of the calling thread. Warp ID is guaranteed to be unique among warps, but may not correspond to a zero-based ranking within the thread block.
*/
__device__ __forceinline__ unsigned int WarpId()
{
unsigned int ret;
asm ("mov.u32 %0, %%warpid;" : "=r"(ret) );
return ret;
}
/**
* \brief Returns the warp lane mask of all lanes less than the calling thread
*/
__device__ __forceinline__ unsigned int LaneMaskLt()
{
unsigned int ret;
asm ("mov.u32 %0, %%lanemask_lt;" : "=r"(ret) );
return ret;
}
/**
* \brief Returns the warp lane mask of all lanes less than or equal to the calling thread
*/
__device__ __forceinline__ unsigned int LaneMaskLe()
{
unsigned int ret;
asm ("mov.u32 %0, %%lanemask_le;" : "=r"(ret) );
return ret;
}
/**
* \brief Returns the warp lane mask of all lanes greater than the calling thread
*/
__device__ __forceinline__ unsigned int LaneMaskGt()
{
unsigned int ret;
asm ("mov.u32 %0, %%lanemask_gt;" : "=r"(ret) );
return ret;
}
/**
* \brief Returns the warp lane mask of all lanes greater than or equal to the calling thread
*/
__device__ __forceinline__ unsigned int LaneMaskGe()
{
unsigned int ret;
asm ("mov.u32 %0, %%lanemask_ge;" : "=r"(ret) );
return ret;
}
/** @} */ // end group UtilPtx
/**
* \brief Shuffle-up for any data type. Each <em>warp-lane<sub>i</sub></em> obtains the value \p input contributed by <em>warp-lane</em><sub><em>i</em>-<tt>src_offset</tt></sub>. For thread lanes \e i < src_offset, the thread's own \p input is returned to the thread. ![](shfl_up_logo.png)
* \ingroup WarpModule
*
* \par
* - Available only for SM3.0 or newer
*
* \par Snippet
* The code snippet below illustrates each thread obtaining a \p double value from the
* predecessor of its predecessor.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/util_ptx.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Obtain one input item per thread
* double thread_data = ...
*
* // Obtain item from two ranks below
* double peer_data = ShuffleUp(thread_data, 2, 0, 0xffffffff);
*
* \endcode
* \par
* Suppose the set of input \p thread_data across the first warp of threads is <tt>{1.0, 2.0, 3.0, 4.0, 5.0, ..., 32.0}</tt>.
* The corresponding output \p peer_data will be <tt>{1.0, 2.0, 1.0, 2.0, 3.0, ..., 30.0}</tt>.
*
*/
template <typename T>
__device__ __forceinline__ T ShuffleUp(
T input, ///< [in] The value to broadcast
int src_offset, ///< [in] The relative down-offset of the peer to read from
int first_lane, ///< [in] Index of first lane in segment (typically 0)
unsigned int member_mask) ///< [in] 32-bit mask of participating warp lanes
{
typedef typename UnitWord<T>::ShuffleWord ShuffleWord;
const int WORDS = (sizeof(T) + sizeof(ShuffleWord) - 1) / sizeof(ShuffleWord);
T output;
ShuffleWord *output_alias = reinterpret_cast<ShuffleWord *>(&output);
ShuffleWord *input_alias = reinterpret_cast<ShuffleWord *>(&input);
unsigned int shuffle_word;
shuffle_word = SHFL_UP_SYNC((unsigned int)input_alias[0], src_offset, first_lane, member_mask);
output_alias[0] = shuffle_word;
#pragma unroll
for (int WORD = 1; WORD < WORDS; ++WORD)
{
shuffle_word = SHFL_UP_SYNC((unsigned int)input_alias[WORD], src_offset, first_lane, member_mask);
output_alias[WORD] = shuffle_word;
}
return output;
}
/**
* \brief Shuffle-down for any data type. Each <em>warp-lane<sub>i</sub></em> obtains the value \p input contributed by <em>warp-lane</em><sub><em>i</em>+<tt>src_offset</tt></sub>. For thread lanes \e i >= WARP_THREADS, the thread's own \p input is returned to the thread. ![](shfl_down_logo.png)
* \ingroup WarpModule
*
* \par
* - Available only for SM3.0 or newer
*
* \par Snippet
* The code snippet below illustrates each thread obtaining a \p double value from the
* successor of its successor.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/util_ptx.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Obtain one input item per thread
* double thread_data = ...
*
* // Obtain item from two ranks below
* double peer_data = ShuffleDown(thread_data, 2, 31, 0xffffffff);
*
* \endcode
* \par
* Suppose the set of input \p thread_data across the first warp of threads is <tt>{1.0, 2.0, 3.0, 4.0, 5.0, ..., 32.0}</tt>.
* The corresponding output \p peer_data will be <tt>{3.0, 4.0, 5.0, 6.0, 7.0, ..., 32.0}</tt>.
*
*/
template <typename T>
__device__ __forceinline__ T ShuffleDown(
T input, ///< [in] The value to broadcast
int src_offset, ///< [in] The relative up-offset of the peer to read from
int last_lane, ///< [in] Index of first lane in segment (typically 31)
unsigned int member_mask) ///< [in] 32-bit mask of participating warp lanes
{
typedef typename UnitWord<T>::ShuffleWord ShuffleWord;
const int WORDS = (sizeof(T) + sizeof(ShuffleWord) - 1) / sizeof(ShuffleWord);
T output;
ShuffleWord *output_alias = reinterpret_cast<ShuffleWord *>(&output);
ShuffleWord *input_alias = reinterpret_cast<ShuffleWord *>(&input);
unsigned int shuffle_word;
shuffle_word = SHFL_DOWN_SYNC((unsigned int)input_alias[0], src_offset, last_lane, member_mask);
output_alias[0] = shuffle_word;
#pragma unroll
for (int WORD = 1; WORD < WORDS; ++WORD)
{
shuffle_word = SHFL_DOWN_SYNC((unsigned int)input_alias[WORD], src_offset, last_lane, member_mask);
output_alias[WORD] = shuffle_word;
}
return output;
}
/**
* \brief Shuffle-broadcast for any data type. Each <em>warp-lane<sub>i</sub></em> obtains the value \p input
* contributed by <em>warp-lane</em><sub><tt>src_lane</tt></sub>. For \p src_lane < 0 or \p src_lane >= WARP_THREADS,
* then the thread's own \p input is returned to the thread. ![](shfl_broadcast_logo.png)
*
* \ingroup WarpModule
*
* \par
* - Available only for SM3.0 or newer
*
* \par Snippet
* The code snippet below illustrates each thread obtaining a \p double value from <em>warp-lane</em><sub>0</sub>.
*
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/util_ptx.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Obtain one input item per thread
* double thread_data = ...
*
* // Obtain item from thread 0
* double peer_data = ShuffleIndex(thread_data, 0, 32, 0xffffffff);
*
* \endcode
* \par
* Suppose the set of input \p thread_data across the first warp of threads is <tt>{1.0, 2.0, 3.0, 4.0, 5.0, ..., 32.0}</tt>.
* The corresponding output \p peer_data will be <tt>{1.0, 1.0, 1.0, 1.0, 1.0, ..., 1.0}</tt>.
*
*/
template <typename T>
__device__ __forceinline__ T ShuffleIndex(
T input, ///< [in] The value to broadcast
int src_lane, ///< [in] Which warp lane is to do the broadcasting
int logical_warp_threads, ///< [in] Number of threads per logical warp
unsigned int member_mask) ///< [in] 32-bit mask of participating warp lanes
{
typedef typename UnitWord<T>::ShuffleWord ShuffleWord;
const int WORDS = (sizeof(T) + sizeof(ShuffleWord) - 1) / sizeof(ShuffleWord);
T output;
ShuffleWord *output_alias = reinterpret_cast<ShuffleWord *>(&output);
ShuffleWord *input_alias = reinterpret_cast<ShuffleWord *>(&input);
unsigned int shuffle_word;
shuffle_word = SHFL_IDX_SYNC((unsigned int)input_alias[0],
src_lane,
logical_warp_threads - 1,
member_mask);
output_alias[0] = shuffle_word;
#pragma unroll
for (int WORD = 1; WORD < WORDS; ++WORD)
{
shuffle_word = SHFL_IDX_SYNC((unsigned int)input_alias[WORD],
src_lane,
logical_warp_threads - 1,
member_mask);
output_alias[WORD] = shuffle_word;
}
return output;
}
/**
* Compute a 32b mask of threads having the same least-significant
* LABEL_BITS of \p label as the calling thread.
*/
template <int LABEL_BITS>
inline __device__ unsigned int MatchAny(unsigned int label)
{
unsigned int retval;
// Extract masks of common threads for each bit
#pragma unroll
for (int BIT = 0; BIT < LABEL_BITS; ++BIT)
{
unsigned int mask;
unsigned int current_bit = 1 << BIT;
asm ("{\n"
" .reg .pred p;\n"
" and.b32 %0, %1, %2;"
" setp.eq.u32 p, %0, %2;\n"
#ifdef CUB_USE_COOPERATIVE_GROUPS
" vote.ballot.sync.b32 %0, p, 0xffffffff;\n"
#else
" vote.ballot.b32 %0, p;\n"
#endif
" @!p not.b32 %0, %0;\n"
"}\n" : "=r"(mask) : "r"(label), "r"(current_bit));
// Remove peers who differ
retval = (BIT == 0) ? mask : retval & mask;
}
return retval;
// // VOLTA match
// unsigned int retval;
// asm ("{\n"
// " match.any.sync.b32 %0, %1, 0xffffffff;\n"
// "}\n" : "=r"(retval) : "r"(label));
// return retval;
}
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external | rapidsai_public_repos/nvgraph/external/cub_semiring/util_type.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Common type manipulation (metaprogramming) utilities
*/
#pragma once
#include <iostream>
#include <limits>
#include <cfloat>
#include "util_macro.cuh"
#include "util_arch.cuh"
#include "util_namespace.cuh"
#include "cuComplex.h"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilModule
* @{
*/
/******************************************************************************
* Type equality
******************************************************************************/
/**
* \brief Type selection (<tt>IF ? ThenType : ElseType</tt>)
*/
template <bool IF, typename ThenType, typename ElseType>
struct If
{
/// Conditional type result
typedef ThenType Type; // true
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename ThenType, typename ElseType>
struct If<false, ThenType, ElseType>
{
typedef ElseType Type; // false
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Conditional types
******************************************************************************/
/**
* \brief Type equality test
*/
template <typename A, typename B>
struct Equals
{
enum {
VALUE = 0,
NEGATE = 1
};
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename A>
struct Equals <A, A>
{
enum {
VALUE = 1,
NEGATE = 0
};
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Static math
******************************************************************************/
/**
* \brief Statically determine log2(N), rounded up.
*
* For example:
* Log2<8>::VALUE // 3
* Log2<3>::VALUE // 2
*/
template <int N, int CURRENT_VAL = N, int COUNT = 0>
struct Log2
{
/// Static logarithm value
enum { VALUE = Log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE }; // Inductive case
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <int N, int COUNT>
struct Log2<N, 0, COUNT>
{
enum {VALUE = (1 << (COUNT - 1) < N) ? // Base case
COUNT :
COUNT - 1 };
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Statically determine if N is a power-of-two
*/
template <int N>
struct PowerOfTwo
{
enum { VALUE = ((N & (N - 1)) == 0) };
};
/******************************************************************************
* Pointer vs. iterator detection
******************************************************************************/
/**
* \brief Pointer vs. iterator
*/
template <typename Tp>
struct IsPointer
{
enum { VALUE = 0 };
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename Tp>
struct IsPointer<Tp*>
{
enum { VALUE = 1 };
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Qualifier detection
******************************************************************************/
/**
* \brief Volatile modifier test
*/
template <typename Tp>
struct IsVolatile
{
enum { VALUE = 0 };
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename Tp>
struct IsVolatile<Tp volatile>
{
enum { VALUE = 1 };
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Qualifier removal
******************************************************************************/
/**
* \brief Removes \p const and \p volatile qualifiers from type \p Tp.
*
* For example:
* <tt>typename RemoveQualifiers<volatile int>::Type // int;</tt>
*/
template <typename Tp, typename Up = Tp>
struct RemoveQualifiers
{
/// Type without \p const and \p volatile qualifiers
typedef Up Type;
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename Tp, typename Up>
struct RemoveQualifiers<Tp, volatile Up>
{
typedef Up Type;
};
template <typename Tp, typename Up>
struct RemoveQualifiers<Tp, const Up>
{
typedef Up Type;
};
template <typename Tp, typename Up>
struct RemoveQualifiers<Tp, const volatile Up>
{
typedef Up Type;
};
/******************************************************************************
* Marker types
******************************************************************************/
/**
* \brief A simple "NULL" marker type
*/
struct NullType
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename T>
__host__ __device__ __forceinline__ NullType& operator =(const T&) { return *this; }
__host__ __device__ __forceinline__ bool operator ==(const NullType&) { return true; }
__host__ __device__ __forceinline__ bool operator !=(const NullType&) { return false; }
#endif // DOXYGEN_SHOULD_SKIP_THIS
};
/**
* \brief Allows for the treatment of an integral constant as a type at compile-time (e.g., to achieve static call dispatch based on constant integral values)
*/
template <int A>
struct Int2Type
{
enum {VALUE = A};
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/******************************************************************************
* Size and alignment
******************************************************************************/
/// Structure alignment
template <typename T>
struct AlignBytes
{
struct Pad
{
T val;
char byte;
};
enum
{
/// The "true CUDA" alignment of T in bytes
ALIGN_BYTES = sizeof(Pad) - sizeof(T)
};
/// The "truly aligned" type
typedef T Type;
};
// Specializations where host C++ compilers (e.g., 32-bit Windows) may disagree
// with device C++ compilers (EDG) on types passed as template parameters through
// kernel functions
#define __CUB_ALIGN_BYTES(t, b) \
template <> struct AlignBytes<t> \
{ enum { ALIGN_BYTES = b }; typedef __align__(b) t Type; };
__CUB_ALIGN_BYTES(short4, 8)
__CUB_ALIGN_BYTES(ushort4, 8)
__CUB_ALIGN_BYTES(int2, 8)
__CUB_ALIGN_BYTES(uint2, 8)
__CUB_ALIGN_BYTES(long long, 8)
__CUB_ALIGN_BYTES(unsigned long long, 8)
__CUB_ALIGN_BYTES(float2, 8)
__CUB_ALIGN_BYTES(double, 8)
#ifdef _WIN32
__CUB_ALIGN_BYTES(long2, 8)
__CUB_ALIGN_BYTES(ulong2, 8)
#else
__CUB_ALIGN_BYTES(long2, 16)
__CUB_ALIGN_BYTES(ulong2, 16)
#endif
__CUB_ALIGN_BYTES(int4, 16)
__CUB_ALIGN_BYTES(uint4, 16)
__CUB_ALIGN_BYTES(float4, 16)
__CUB_ALIGN_BYTES(long4, 16)
__CUB_ALIGN_BYTES(ulong4, 16)
__CUB_ALIGN_BYTES(longlong2, 16)
__CUB_ALIGN_BYTES(ulonglong2, 16)
__CUB_ALIGN_BYTES(double2, 16)
__CUB_ALIGN_BYTES(longlong4, 16)
__CUB_ALIGN_BYTES(ulonglong4, 16)
__CUB_ALIGN_BYTES(double4, 16)
template <typename T> struct AlignBytes<volatile T> : AlignBytes<T> {};
template <typename T> struct AlignBytes<const T> : AlignBytes<T> {};
template <typename T> struct AlignBytes<const volatile T> : AlignBytes<T> {};
/// Unit-words of data movement
template <typename T>
struct UnitWord
{
enum {
ALIGN_BYTES = AlignBytes<T>::ALIGN_BYTES
};
template <typename Unit>
struct IsMultiple
{
enum {
UNIT_ALIGN_BYTES = AlignBytes<Unit>::ALIGN_BYTES,
IS_MULTIPLE = (sizeof(T) % sizeof(Unit) == 0) && (ALIGN_BYTES % UNIT_ALIGN_BYTES == 0)
};
};
/// Biggest shuffle word that T is a whole multiple of and is not larger than the alignment of T
typedef typename If<IsMultiple<int>::IS_MULTIPLE,
unsigned int,
typename If<IsMultiple<short>::IS_MULTIPLE,
unsigned short,
unsigned char>::Type>::Type ShuffleWord;
/// Biggest volatile word that T is a whole multiple of and is not larger than the alignment of T
typedef typename If<IsMultiple<long long>::IS_MULTIPLE,
unsigned long long,
ShuffleWord>::Type VolatileWord;
/// Biggest memory-access word that T is a whole multiple of and is not larger than the alignment of T
typedef typename If<IsMultiple<longlong2>::IS_MULTIPLE,
ulonglong2,
VolatileWord>::Type DeviceWord;
/// Biggest texture reference word that T is a whole multiple of and is not larger than the alignment of T
typedef typename If<IsMultiple<int4>::IS_MULTIPLE,
uint4,
typename If<IsMultiple<int2>::IS_MULTIPLE,
uint2,
ShuffleWord>::Type>::Type TextureWord;
};
// float2 specialization workaround (for SM10-SM13)
template <>
struct UnitWord <float2>
{
typedef int ShuffleWord;
#if (CUB_PTX_ARCH > 0) && (CUB_PTX_ARCH <= 130)
typedef float VolatileWord;
typedef uint2 DeviceWord;
#else
typedef unsigned long long VolatileWord;
typedef unsigned long long DeviceWord;
#endif
typedef float2 TextureWord;
};
// float4 specialization workaround (for SM10-SM13)
template <>
struct UnitWord <float4>
{
typedef int ShuffleWord;
#if (CUB_PTX_ARCH > 0) && (CUB_PTX_ARCH <= 130)
typedef float VolatileWord;
typedef uint4 DeviceWord;
#else
typedef unsigned long long VolatileWord;
typedef ulonglong2 DeviceWord;
#endif
typedef float4 TextureWord;
};
// char2 specialization workaround (for SM10-SM13)
template <>
struct UnitWord <char2>
{
typedef unsigned short ShuffleWord;
#if (CUB_PTX_ARCH > 0) && (CUB_PTX_ARCH <= 130)
typedef unsigned short VolatileWord;
typedef short DeviceWord;
#else
typedef unsigned short VolatileWord;
typedef unsigned short DeviceWord;
#endif
typedef unsigned short TextureWord;
};
template <typename T> struct UnitWord<volatile T> : UnitWord<T> {};
template <typename T> struct UnitWord<const T> : UnitWord<T> {};
template <typename T> struct UnitWord<const volatile T> : UnitWord<T> {};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Vector type inference utilities.
******************************************************************************/
/**
* \brief Exposes a member typedef \p Type that names the corresponding CUDA vector type if one exists. Otherwise \p Type refers to the CubVector structure itself, which will wrap the corresponding \p x, \p y, etc. vector fields.
*/
template <typename T, int vec_elements> struct CubVector;
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
enum
{
/// The maximum number of elements in CUDA vector types
MAX_VEC_ELEMENTS = 4,
};
/**
* Generic vector-1 type
*/
template <typename T>
struct CubVector<T, 1>
{
T x;
typedef T BaseType;
typedef CubVector<T, 1> Type;
};
/**
* Generic vector-2 type
*/
template <typename T>
struct CubVector<T, 2>
{
T x;
T y;
typedef T BaseType;
typedef CubVector<T, 2> Type;
};
/**
* Generic vector-3 type
*/
template <typename T>
struct CubVector<T, 3>
{
T x;
T y;
T z;
typedef T BaseType;
typedef CubVector<T, 3> Type;
};
/**
* Generic vector-4 type
*/
template <typename T>
struct CubVector<T, 4>
{
T x;
T y;
T z;
T w;
typedef T BaseType;
typedef CubVector<T, 4> Type;
};
/**
* Macro for expanding partially-specialized built-in vector types
*/
#define CUB_DEFINE_VECTOR_TYPE(base_type,short_type) \
\
template<> struct CubVector<base_type, 1> : short_type##1 \
{ \
typedef base_type BaseType; \
typedef short_type##1 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
return retval; \
} \
}; \
\
template<> struct CubVector<base_type, 2> : short_type##2 \
{ \
typedef base_type BaseType; \
typedef short_type##2 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
retval.y = y + other.y; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
retval.y = y - other.y; \
return retval; \
} \
}; \
\
template<> struct CubVector<base_type, 3> : short_type##3 \
{ \
typedef base_type BaseType; \
typedef short_type##3 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
retval.y = y + other.y; \
retval.z = z + other.z; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
retval.y = y - other.y; \
retval.z = z - other.z; \
return retval; \
} \
}; \
\
template<> struct CubVector<base_type, 4> : short_type##4 \
{ \
typedef base_type BaseType; \
typedef short_type##4 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
retval.y = y + other.y; \
retval.z = z + other.z; \
retval.w = w + other.w; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
retval.y = y - other.y; \
retval.z = z - other.z; \
retval.w = w - other.w; \
return retval; \
} \
};
// Expand CUDA vector types for built-in primitives
CUB_DEFINE_VECTOR_TYPE(char, char)
CUB_DEFINE_VECTOR_TYPE(signed char, char)
CUB_DEFINE_VECTOR_TYPE(short, short)
CUB_DEFINE_VECTOR_TYPE(int, int)
CUB_DEFINE_VECTOR_TYPE(long, long)
CUB_DEFINE_VECTOR_TYPE(long long, longlong)
CUB_DEFINE_VECTOR_TYPE(unsigned char, uchar)
CUB_DEFINE_VECTOR_TYPE(unsigned short, ushort)
CUB_DEFINE_VECTOR_TYPE(unsigned int, uint)
CUB_DEFINE_VECTOR_TYPE(unsigned long, ulong)
CUB_DEFINE_VECTOR_TYPE(unsigned long long, ulonglong)
CUB_DEFINE_VECTOR_TYPE(float, float)
CUB_DEFINE_VECTOR_TYPE(double, double)
CUB_DEFINE_VECTOR_TYPE(bool, uchar)
// Undefine macros
#undef CUB_DEFINE_VECTOR_TYPE
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Wrapper types
******************************************************************************/
/**
* \brief A storage-backing wrapper that allows types with non-trivial constructors to be aliased in unions
*/
template <typename T>
struct Uninitialized
{
/// Biggest memory-access word that T is a whole multiple of and is not larger than the alignment of T
typedef typename UnitWord<T>::DeviceWord DeviceWord;
enum
{
WORDS = sizeof(T) / sizeof(DeviceWord)
};
/// Backing storage
DeviceWord storage[WORDS];
/// Alias
__host__ __device__ __forceinline__ T& Alias()
{
return reinterpret_cast<T&>(*this);
}
};
/**
* \brief A key identifier paired with a corresponding value
*/
template <
typename _Key,
typename _Value
#if defined(_WIN32) && !defined(_WIN64)
, bool KeyIsLT = (AlignBytes<_Key>::ALIGN_BYTES < AlignBytes<_Value>::ALIGN_BYTES)
, bool ValIsLT = (AlignBytes<_Value>::ALIGN_BYTES < AlignBytes<_Key>::ALIGN_BYTES)
#endif // #if defined(_WIN32) && !defined(_WIN64)
>
struct KeyValuePair
{
typedef _Key Key; ///< Key data type
typedef _Value Value; ///< Value data type
Key key; ///< Item key
Value value; ///< Item value
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair() {}
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {}
/// Inequality operator
__host__ __device__ __forceinline__ bool operator !=(const KeyValuePair &b)
{
return (value != b.value) || (key != b.key);
}
};
#if defined(_WIN32) && !defined(_WIN64)
/**
* Win32 won't do 16B alignment. This can present two problems for
* should-be-16B-aligned (but actually 8B aligned) built-in and intrinsics members:
* 1) If a smaller-aligned item were to be listed first, the host compiler places the
* should-be-16B item at too early an offset (and disagrees with device compiler)
* 2) Or, if a smaller-aligned item lists second, the host compiler gets the size
* of the struct wrong (and disagrees with device compiler)
*
* So we put the larger-should-be-aligned item first, and explicitly pad the
* end of the struct
*/
/// Smaller key specialization
template <typename K, typename V>
struct KeyValuePair<K, V, true, false>
{
typedef K Key;
typedef V Value;
typedef char Pad[AlignBytes<V>::ALIGN_BYTES - AlignBytes<K>::ALIGN_BYTES];
Value value; // Value has larger would-be alignment and goes first
Key key;
Pad pad;
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair() {}
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {}
/// Inequality operator
__host__ __device__ __forceinline__ bool operator !=(const KeyValuePair &b)
{
return (value != b.value) || (key != b.key);
}
};
/// Smaller value specialization
template <typename K, typename V>
struct KeyValuePair<K, V, false, true>
{
typedef K Key;
typedef V Value;
typedef char Pad[AlignBytes<K>::ALIGN_BYTES - AlignBytes<V>::ALIGN_BYTES];
Key key; // Key has larger would-be alignment and goes first
Value value;
Pad pad;
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair() {}
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {}
/// Inequality operator
__host__ __device__ __forceinline__ bool operator !=(const KeyValuePair &b)
{
return (value != b.value) || (key != b.key);
}
};
#endif // #if defined(_WIN32) && !defined(_WIN64)
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* \brief A wrapper for passing simple static arrays as kernel parameters
*/
template <typename T, int COUNT>
struct ArrayWrapper
{
/// Statically-sized array of type \p T
T array[COUNT];
/// Constructor
__host__ __device__ __forceinline__ ArrayWrapper() {}
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Double-buffer storage wrapper for multi-pass stream transformations that require more than one storage array for streaming intermediate results back and forth.
*
* Many multi-pass computations require a pair of "ping-pong" storage
* buffers (e.g., one for reading from and the other for writing to, and then
* vice-versa for the subsequent pass). This structure wraps a set of device
* buffers and a "selector" member to track which is "current".
*/
template <typename T>
struct DoubleBuffer
{
/// Pair of device buffer pointers
T *d_buffers[2];
/// Selector into \p d_buffers (i.e., the active/valid buffer)
int selector;
/// \brief Constructor
__host__ __device__ __forceinline__ DoubleBuffer()
{
selector = 0;
d_buffers[0] = NULL;
d_buffers[1] = NULL;
}
/// \brief Constructor
__host__ __device__ __forceinline__ DoubleBuffer(
T *d_current, ///< The currently valid buffer
T *d_alternate) ///< Alternate storage buffer of the same size as \p d_current
{
selector = 0;
d_buffers[0] = d_current;
d_buffers[1] = d_alternate;
}
/// \brief Return pointer to the currently valid buffer
__host__ __device__ __forceinline__ T* Current() { return d_buffers[selector]; }
/// \brief Return pointer to the currently invalid buffer
__host__ __device__ __forceinline__ T* Alternate() { return d_buffers[selector ^ 1]; }
};
/******************************************************************************
* Typedef-detection
******************************************************************************/
/**
* \brief Defines a structure \p detector_name that is templated on type \p T. The \p detector_name struct exposes a constant member \p VALUE indicating whether or not parameter \p T exposes a nested type \p nested_type_name
*/
#define CUB_DEFINE_DETECT_NESTED_TYPE(detector_name, nested_type_name) \
template <typename T> \
struct detector_name \
{ \
template <typename C> \
static char& test(typename C::nested_type_name*); \
template <typename> \
static int& test(...); \
enum \
{ \
VALUE = sizeof(test<T>(0)) < sizeof(int) \
}; \
};
/******************************************************************************
* Simple enable-if (similar to Boost)
******************************************************************************/
/**
* \brief Simple enable-if (similar to Boost)
*/
template <bool Condition, class T = void>
struct EnableIf
{
/// Enable-if type for SFINAE dummy variables
typedef T Type;
};
template <class T>
struct EnableIf<false, T> {};
/******************************************************************************
* Typedef-detection
******************************************************************************/
/**
* \brief Determine whether or not BinaryOp's functor is of the form <tt>bool operator()(const T& a, const T&b)</tt> or <tt>bool operator()(const T& a, const T&b, unsigned int idx)</tt>
*/
template <typename T, typename BinaryOp>
struct BinaryOpHasIdxParam
{
private:
/*
template <typename BinaryOpT, bool (BinaryOpT::*)(const T &a, const T &b, unsigned int idx) const> struct SFINAE1 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(const T &a, const T &b, unsigned int idx)> struct SFINAE2 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(T a, T b, unsigned int idx) const> struct SFINAE3 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(T a, T b, unsigned int idx)> struct SFINAE4 {};
*/
template <typename BinaryOpT, bool (BinaryOpT::*)(const T &a, const T &b, int idx) const> struct SFINAE5 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(const T &a, const T &b, int idx)> struct SFINAE6 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(T a, T b, int idx) const> struct SFINAE7 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(T a, T b, int idx)> struct SFINAE8 {};
/*
template <typename BinaryOpT> static char Test(SFINAE1<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE2<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE3<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE4<BinaryOpT, &BinaryOpT::operator()> *);
*/
template <typename BinaryOpT> static char Test(SFINAE5<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE6<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE7<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE8<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static int Test(...);
public:
/// Whether the functor BinaryOp has a third <tt>unsigned int</tt> index param
static const bool HAS_PARAM = sizeof(Test<BinaryOp>(NULL)) == sizeof(char);
};
/******************************************************************************
* Simple type traits utilities.
*
* For example:
* Traits<int>::CATEGORY // SIGNED_INTEGER
* Traits<NullType>::NULL_TYPE // true
* Traits<uint4>::CATEGORY // NOT_A_NUMBER
* Traits<uint4>::PRIMITIVE; // false
*
******************************************************************************/
/**
* \brief Basic type traits categories
*/
enum Category
{
NOT_A_NUMBER,
SIGNED_INTEGER,
UNSIGNED_INTEGER,
FLOATING_POINT
};
/**
* \brief Basic type traits
*/
template <Category _CATEGORY, bool _PRIMITIVE, bool _NULL_TYPE, typename _UnsignedBits, typename T>
struct BaseTraits
{
/// Category
static const Category CATEGORY = _CATEGORY;
enum
{
PRIMITIVE = _PRIMITIVE,
NULL_TYPE = _NULL_TYPE,
};
};
/**
* Basic type traits (unsigned primitive specialization)
*/
template <typename _UnsignedBits, typename T>
struct BaseTraits<UNSIGNED_INTEGER, true, false, _UnsignedBits, T>
{
typedef _UnsignedBits UnsignedBits;
static const Category CATEGORY = UNSIGNED_INTEGER;
static const UnsignedBits LOWEST_KEY = UnsignedBits(0);
static const UnsignedBits MAX_KEY = UnsignedBits(-1);
enum
{
PRIMITIVE = true,
NULL_TYPE = false,
};
static __device__ __forceinline__ UnsignedBits TwiddleIn(UnsignedBits key)
{
return key;
}
static __device__ __forceinline__ UnsignedBits TwiddleOut(UnsignedBits key)
{
return key;
}
static __host__ __device__ __forceinline__ T Max()
{
UnsignedBits retval = MAX_KEY;
return reinterpret_cast<T&>(retval);
}
static __host__ __device__ __forceinline__ T Lowest()
{
UnsignedBits retval = LOWEST_KEY;
return reinterpret_cast<T&>(retval);
}
};
/**
* Basic type traits (signed primitive specialization)
*/
template <typename _UnsignedBits, typename T>
struct BaseTraits<SIGNED_INTEGER, true, false, _UnsignedBits, T>
{
typedef _UnsignedBits UnsignedBits;
static const Category CATEGORY = SIGNED_INTEGER;
static const UnsignedBits HIGH_BIT = UnsignedBits(1) << ((sizeof(UnsignedBits) * 8) - 1);
static const UnsignedBits LOWEST_KEY = HIGH_BIT;
static const UnsignedBits MAX_KEY = UnsignedBits(-1) ^ HIGH_BIT;
enum
{
PRIMITIVE = true,
NULL_TYPE = false,
};
static __device__ __forceinline__ UnsignedBits TwiddleIn(UnsignedBits key)
{
return key ^ HIGH_BIT;
};
static __device__ __forceinline__ UnsignedBits TwiddleOut(UnsignedBits key)
{
return key ^ HIGH_BIT;
};
static __host__ __device__ __forceinline__ T Max()
{
UnsignedBits retval = MAX_KEY;
return reinterpret_cast<T&>(retval);
}
static __host__ __device__ __forceinline__ T Lowest()
{
UnsignedBits retval = LOWEST_KEY;
return reinterpret_cast<T&>(retval);
}
};
template <typename _T>
struct FpLimits;
template <>
struct FpLimits<float>
{
static __host__ __device__ __forceinline__ float Max() {
return FLT_MAX;
}
static __host__ __device__ __forceinline__ float Lowest() {
return FLT_MAX * float(-1);
}
};
template <>
struct FpLimits<double>
{
static __host__ __device__ __forceinline__ double Max() {
return DBL_MAX;
}
static __host__ __device__ __forceinline__ double Lowest() {
return DBL_MAX * double(-1);
}
};
template <typename _T>
struct TypeConst;
template <>
struct TypeConst<cuComplex>
{
static __host__ __device__ __forceinline__ cuComplex Zero()
{
return make_cuComplex(0.f, 0.f);
}
static __host__ __device__ __forceinline__ cuComplex One()
{
return make_cuComplex(1.f, 0.f);
}
};
template <>
struct TypeConst<cuDoubleComplex>
{
static __host__ __device__ __forceinline__ cuDoubleComplex Zero()
{
return make_cuDoubleComplex(0.f, 0.f);
}
static __host__ __device__ __forceinline__ cuDoubleComplex One()
{
return make_cuDoubleComplex(1.f, 0.f);
}
};
template <typename _T>
struct TypeConst
{
static __host__ __device__ __forceinline__ _T Zero()
{
return _T(0);
}
static __host__ __device__ __forceinline__ _T One()
{
return _T(1);
}
};
/**
* Basic type traits (fp primitive specialization)
*/
template <typename _UnsignedBits, typename T>
struct BaseTraits<FLOATING_POINT, true, false, _UnsignedBits, T>
{
typedef _UnsignedBits UnsignedBits;
static const Category CATEGORY = FLOATING_POINT;
static const UnsignedBits HIGH_BIT = UnsignedBits(1) << ((sizeof(UnsignedBits) * 8) - 1);
static const UnsignedBits LOWEST_KEY = UnsignedBits(-1);
static const UnsignedBits MAX_KEY = UnsignedBits(-1) ^ HIGH_BIT;
enum
{
PRIMITIVE = true,
NULL_TYPE = false,
};
static __device__ __forceinline__ UnsignedBits TwiddleIn(UnsignedBits key)
{
UnsignedBits mask = (key & HIGH_BIT) ? UnsignedBits(-1) : HIGH_BIT;
return key ^ mask;
};
static __device__ __forceinline__ UnsignedBits TwiddleOut(UnsignedBits key)
{
UnsignedBits mask = (key & HIGH_BIT) ? HIGH_BIT : UnsignedBits(-1);
return key ^ mask;
};
static __host__ __device__ __forceinline__ T Max() {
return FpLimits<T>::Max();
}
static __host__ __device__ __forceinline__ T Lowest() {
return FpLimits<T>::Lowest();
}
};
/**
* Basic type traits (fp complex primitive specialization)
*/
template <typename Unused, typename T>
struct BaseTraits<FLOATING_POINT, false, false, Unused, T>
{
typedef Unused UnsignedBits;
static const Category CATEGORY = FLOATING_POINT;
enum
{
PRIMITIVE = false,
NULL_TYPE = false,
};
};
/**
* \brief Numeric type traits
*/
template <typename T> struct NumericTraits : BaseTraits<NOT_A_NUMBER, false, false, T, T> {};
template <> struct NumericTraits<NullType> : BaseTraits<NOT_A_NUMBER, false, true, NullType, NullType> {};
template <> struct NumericTraits<char> : BaseTraits<(std::numeric_limits<char>::is_signed) ? SIGNED_INTEGER : UNSIGNED_INTEGER, true, false, unsigned char, char> {};
template <> struct NumericTraits<signed char> : BaseTraits<SIGNED_INTEGER, true, false, unsigned char, signed char> {};
template <> struct NumericTraits<short> : BaseTraits<SIGNED_INTEGER, true, false, unsigned short, short> {};
template <> struct NumericTraits<int> : BaseTraits<SIGNED_INTEGER, true, false, unsigned int, int> {};
template <> struct NumericTraits<long> : BaseTraits<SIGNED_INTEGER, true, false, unsigned long, long> {};
template <> struct NumericTraits<long long> : BaseTraits<SIGNED_INTEGER, true, false, unsigned long long, long long> {};
template <> struct NumericTraits<unsigned char> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned char, unsigned char> {};
template <> struct NumericTraits<unsigned short> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned short, unsigned short> {};
template <> struct NumericTraits<unsigned int> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned int, unsigned int> {};
template <> struct NumericTraits<unsigned long> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned long, unsigned long> {};
template <> struct NumericTraits<unsigned long long> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned long long, unsigned long long> {};
template <> struct NumericTraits<float> : BaseTraits<FLOATING_POINT, true, false, unsigned int, float> {};
template <> struct NumericTraits<double> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, double> {};
template <> struct NumericTraits<cuComplex> : BaseTraits<FLOATING_POINT, false, false, void, cuComplex> {};
template <> struct NumericTraits<cuDoubleComplex> : BaseTraits<FLOATING_POINT, false, false, void, cuDoubleComplex> {};
template <> struct NumericTraits<bool> : BaseTraits<UNSIGNED_INTEGER, true, false, typename UnitWord<bool>::VolatileWord, bool> {};
/**
* \brief Type traits
*/
template <typename T>
struct Traits : NumericTraits<typename RemoveQualifiers<T>::Type> {};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Semiring util
*/
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
//@TODO: reuse cub
/*template <typename T>
struct type_info;
template <>
struct type_info<double>
{
static __host__ __device__ __forceinline__ double inf() { return DBL_MAX;}
static __host__ __device__ __forceinline__ double ninf() { return -DBL_MAX;}
// this is what we use as a tolerance in the algorithms, more precision than this is useless for CPU reference comparison
static __host__ __device__ __forceinline__ double tol() { return 1e-6; }
};
template <>
struct type_info<float>
{
static __host__ __device__ __forceinline__ float inf() {return FLT_MAX;}
static __host__ __device__ __forceinline__ float ninf() {return -FLT_MAX;}
static __host__ __device__ __forceinline__ float tol() {return 1e-4;}
};
template <>
struct type_info<int>
{
static __host__ __device__ __forceinline__ int inf() {return INT_MAX;}
static __host__ __device__ __forceinline__ int ninf() {return INT_MIN;}
static __host__ __device__ __forceinline__ int tol() {return 0;}
};*/
template<typename V>
struct PlusTimesSemiring
{
// enable with c++11
/*
static_assert( std::is_same<float, typename std::remove_cv<V>::type>::value ||
std::is_same<double, typename std::remove_cv<T>::type>::value,
"Graph value type is not supported by this semiring");
*/
static __host__ __device__ __forceinline__ V plus_ident(){ return TypeConst<V>::Zero();}
static __host__ __device__ __forceinline__ V times_ident(){ return TypeConst<V>::One();}
static __host__ __device__ __forceinline__ V times_null(){ return TypeConst<V>::Zero();}
static __host__ __device__ __forceinline__ V plus(const V &arg0, const V &arg1)
{
return arg0 + arg1;
}
static __host__ __device__ __forceinline__ V times(const V &arg0, const V &arg1)
{
return arg0 * arg1;
}
// used in external algs
struct SumOp
{
/// Boolean sum operator, returns <tt>a + b</tt>
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return plus(a, b);
}
};
enum{
HAS_PLUS_ATOMICS = 1, // for cub fixup path deduction
};
};
template<typename V>
struct MinPlusSemiring
{
// enable with c++11
/*
static_assert( std::is_same<float, typename std::remove_cv<V>::type>::value ||
std::is_same<double, typename std::remove_cv<T>::type>::value,
"Graph value type is not supported by this semiring");
*/
static __host__ __device__ __forceinline__ V plus_ident(){ return FpLimits<V>::Max();}
static __host__ __device__ __forceinline__ V times_ident(){ return TypeConst<V>::Zero();}
static __host__ __device__ __forceinline__ V times_null(){ return FpLimits<V>::Max();}
static __host__ __device__ __forceinline__ V plus(const V &arg0, const V &arg1)
{
return CUB_MIN(arg0, arg1);
}
static __host__ __device__ __forceinline__ V times(const V &arg0, const V &arg1)
{
return arg0 + arg1;
}
// used in external algs
struct SumOp
{
/// Boolean sum operator, returns <tt>a + b</tt>
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return plus(a, b);
}
};
enum{
HAS_PLUS_ATOMICS = 0, // for cub fixup path deduction
};
};
template<typename V>
struct MaxMinSemiring
{
// enable with c++11
/*
static_assert( std::is_same<float, typename std::remove_cv<V>::type>::value ||
std::is_same<double, typename std::remove_cv<T>::type>::value,
"Graph value type is not supported by this semiring");
*/
static __host__ __device__ __forceinline__ V plus_ident(){ return FpLimits<V>::Lowest();}
static __host__ __device__ __forceinline__ V times_ident(){ return FpLimits<V>::Max();}
static __host__ __device__ __forceinline__ V times_null(){ return FpLimits<V>::Lowest();}
static __host__ __device__ __forceinline__ V plus(const V &arg0, const V &arg1)
{
return CUB_MAX(arg0, arg1);
}
static __host__ __device__ __forceinline__ V times(const V &arg0, const V &arg1)
{
return CUB_MIN(arg0, arg1);
}
// used in external algs
struct SumOp
{
/// Boolean sum operator, returns <tt>a + b</tt>
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return plus(a, b);
}
};
enum{
HAS_PLUS_ATOMICS = 0, // for cub fixup path deduction
};
};
template<typename V>
struct OrAndBoolSemiring
{
// enable with c++11
/*
static_assert( std::is_same<float, typename std::remove_cv<V>::type>::value ||
std::is_same<double, typename std::remove_cv<T>::type>::value,
"Graph value type is not supported by this semiring");
*/
static __host__ __device__ __forceinline__ V plus_ident(){ return TypeConst<V>::Zero();}
static __host__ __device__ __forceinline__ V times_ident(){ return TypeConst<V>::One();}
static __host__ __device__ __forceinline__ V times_null(){ return TypeConst<V>::Zero();}
static __host__ __device__ __forceinline__ V plus(const V &arg0, const V &arg1)
{
return (bool) arg0 | (bool) arg1;
}
static __host__ __device__ __forceinline__ V times(const V &arg0, const V &arg1)
{
return (bool) arg0 & (bool) arg1;
}
// used in external algs
struct SumOp
{
/// Boolean sum operator, returns <tt>a + b</tt>
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return plus(a, b);
}
};
enum{
HAS_PLUS_ATOMICS = 0, // for cub fixup path deduction
};
};
template<typename V>
struct LogPlusSemiring
{
// enable with c++11
/*
static_assert( std::is_same<float, typename std::remove_cv<V>::type>::value ||
std::is_same<double, typename std::remove_cv<T>::type>::value,
"Graph value type is not supported by this semiring");
*/
static __host__ __device__ __forceinline__ V plus_ident(){ return FpLimits<V>::Max();}
static __host__ __device__ __forceinline__ V times_ident(){ return TypeConst<V>::Zero();}
static __host__ __device__ __forceinline__ V times_null(){ return FpLimits<V>::Max();}
static __host__ __device__ __forceinline__ V plus(const V &arg0, const V &arg1)
{
return -log(exp(-arg0) + exp(-arg1));
}
static __host__ __device__ __forceinline__ V times(const V &arg0, const V &arg1)
{
return arg0 + arg1;
}
// used in external algs
struct SumOp
{
/// Boolean sum operator, returns <tt>a + b</tt>
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return plus(a, b);
}
};
enum{
HAS_PLUS_ATOMICS = 0, // for cub fixup path deduction
};
};
// used in external algs
template <typename SR>
struct SumOp
{
/// Boolean sum operator, returns <tt>a + b</tt>
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return SR::plus(a, b);
}
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/** @} */ // end group UtilModule
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external | rapidsai_public_repos/nvgraph/external/cub_semiring/util_allocator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple caching allocator for device memory allocations. The allocator is
* thread-safe and capable of managing device allocations on multiple devices.
******************************************************************************/
#pragma once
#include "util_namespace.cuh"
#include "util_debug.cuh"
#include <set>
#include <map>
#include "host/mutex.cuh"
#include <math.h>
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilMgmt
* @{
*/
/******************************************************************************
* CachingDeviceAllocator (host use)
******************************************************************************/
/**
* \brief A simple caching allocator for device memory allocations.
*
* \par Overview
* The allocator is thread-safe and stream-safe and is capable of managing cached
* device allocations on multiple devices. It behaves as follows:
*
* \par
* - Allocations from the allocator are associated with an \p active_stream. Once freed,
* the allocation becomes available immediately for reuse within the \p active_stream
* with which it was associated with during allocation, and it becomes available for
* reuse within other streams when all prior work submitted to \p active_stream has completed.
* - Allocations are categorized and cached by bin size. A new allocation request of
* a given size will only consider cached allocations within the corresponding bin.
* - Bin limits progress geometrically in accordance with the growth factor
* \p bin_growth provided during construction. Unused device allocations within
* a larger bin cache are not reused for allocation requests that categorize to
* smaller bin sizes.
* - Allocation requests below (\p bin_growth ^ \p min_bin) are rounded up to
* (\p bin_growth ^ \p min_bin).
* - Allocations above (\p bin_growth ^ \p max_bin) are not rounded up to the nearest
* bin and are simply freed when they are deallocated instead of being returned
* to a bin-cache.
* - %If the total storage of cached allocations on a given device will exceed
* \p max_cached_bytes, allocations for that device are simply freed when they are
* deallocated instead of being returned to their bin-cache.
*
* \par
* For example, the default-constructed CachingDeviceAllocator is configured with:
* - \p bin_growth = 8
* - \p min_bin = 3
* - \p max_bin = 7
* - \p max_cached_bytes = 6MB - 1B
*
* \par
* which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB
* and sets a maximum of 6,291,455 cached bytes per device
*
*/
struct CachingDeviceAllocator
{
//---------------------------------------------------------------------
// Constants
//---------------------------------------------------------------------
/// Out-of-bounds bin
static const unsigned int INVALID_BIN = (unsigned int) -1;
/// Invalid size
static const size_t INVALID_SIZE = (size_t) -1;
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/// Invalid device ordinal
static const int INVALID_DEVICE_ORDINAL = -1;
//---------------------------------------------------------------------
// Type definitions and helper types
//---------------------------------------------------------------------
/**
* Descriptor for device memory allocations
*/
struct BlockDescriptor
{
void* d_ptr; // Device pointer
size_t bytes; // Size of allocation in bytes
unsigned int bin; // Bin enumeration
int device; // device ordinal
cudaStream_t associated_stream; // Associated associated_stream
cudaEvent_t ready_event; // Signal when associated stream has run to the point at which this block was freed
// Constructor (suitable for searching maps for a specific block, given its pointer and device)
BlockDescriptor(void *d_ptr, int device) :
d_ptr(d_ptr),
bytes(0),
bin(INVALID_BIN),
device(device),
associated_stream(0),
ready_event(0)
{}
// Constructor (suitable for searching maps for a range of suitable blocks, given a device)
BlockDescriptor(int device) :
d_ptr(NULL),
bytes(0),
bin(INVALID_BIN),
device(device),
associated_stream(0),
ready_event(0)
{}
// Comparison functor for comparing device pointers
static bool PtrCompare(const BlockDescriptor &a, const BlockDescriptor &b)
{
if (a.device == b.device)
return (a.d_ptr < b.d_ptr);
else
return (a.device < b.device);
}
// Comparison functor for comparing allocation sizes
static bool SizeCompare(const BlockDescriptor &a, const BlockDescriptor &b)
{
if (a.device == b.device)
return (a.bytes < b.bytes);
else
return (a.device < b.device);
}
};
/// BlockDescriptor comparator function interface
typedef bool (*Compare)(const BlockDescriptor &, const BlockDescriptor &);
class TotalBytes {
public:
size_t free;
size_t live;
TotalBytes() { free = live = 0; }
};
/// Set type for cached blocks (ordered by size)
typedef std::multiset<BlockDescriptor, Compare> CachedBlocks;
/// Set type for live blocks (ordered by ptr)
typedef std::multiset<BlockDescriptor, Compare> BusyBlocks;
/// Map type of device ordinals to the number of cached bytes cached by each device
typedef std::map<int, TotalBytes> GpuCachedBytes;
//---------------------------------------------------------------------
// Utility functions
//---------------------------------------------------------------------
/**
* Integer pow function for unsigned base and exponent
*/
static unsigned int IntPow(
unsigned int base,
unsigned int exp)
{
unsigned int retval = 1;
while (exp > 0)
{
if (exp & 1) {
retval = retval * base; // multiply the result by the current base
}
base = base * base; // square the base
exp = exp >> 1; // divide the exponent in half
}
return retval;
}
/**
* Round up to the nearest power-of
*/
void NearestPowerOf(
unsigned int &power,
size_t &rounded_bytes,
unsigned int base,
size_t value)
{
power = 0;
rounded_bytes = 1;
if (value * base < value)
{
// Overflow
power = sizeof(size_t) * 8;
rounded_bytes = size_t(0) - 1;
return;
}
while (rounded_bytes < value)
{
rounded_bytes *= base;
power++;
}
}
//---------------------------------------------------------------------
// Fields
//---------------------------------------------------------------------
cub::Mutex mutex; /// Mutex for thread-safety
unsigned int bin_growth; /// Geometric growth factor for bin-sizes
unsigned int min_bin; /// Minimum bin enumeration
unsigned int max_bin; /// Maximum bin enumeration
size_t min_bin_bytes; /// Minimum bin size
size_t max_bin_bytes; /// Maximum bin size
size_t max_cached_bytes; /// Maximum aggregate cached bytes per device
const bool skip_cleanup; /// Whether or not to skip a call to FreeAllCached() when destructor is called. (The CUDA runtime may have already shut down for statically declared allocators)
bool debug; /// Whether or not to print (de)allocation events to stdout
GpuCachedBytes cached_bytes; /// Map of device ordinal to aggregate cached bytes on that device
CachedBlocks cached_blocks; /// Set of cached device allocations available for reuse
BusyBlocks live_blocks; /// Set of live device allocations currently in use
#endif // DOXYGEN_SHOULD_SKIP_THIS
//---------------------------------------------------------------------
// Methods
//---------------------------------------------------------------------
/**
* \brief Constructor.
*/
CachingDeviceAllocator(
unsigned int bin_growth, ///< Geometric growth factor for bin-sizes
unsigned int min_bin = 1, ///< Minimum bin (default is bin_growth ^ 1)
unsigned int max_bin = INVALID_BIN, ///< Maximum bin (default is no max bin)
size_t max_cached_bytes = INVALID_SIZE, ///< Maximum aggregate cached bytes per device (default is no limit)
bool skip_cleanup = false, ///< Whether or not to skip a call to \p FreeAllCached() when the destructor is called (default is to deallocate)
bool debug = false) ///< Whether or not to print (de)allocation events to stdout (default is no stderr output)
:
bin_growth(bin_growth),
min_bin(min_bin),
max_bin(max_bin),
min_bin_bytes(IntPow(bin_growth, min_bin)),
max_bin_bytes(IntPow(bin_growth, max_bin)),
max_cached_bytes(max_cached_bytes),
skip_cleanup(skip_cleanup),
debug(debug),
cached_blocks(BlockDescriptor::SizeCompare),
live_blocks(BlockDescriptor::PtrCompare)
{}
/**
* \brief Default constructor.
*
* Configured with:
* \par
* - \p bin_growth = 8
* - \p min_bin = 3
* - \p max_bin = 7
* - \p max_cached_bytes = (\p bin_growth ^ \p max_bin) * 3) - 1 = 6,291,455 bytes
*
* which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB and
* sets a maximum of 6,291,455 cached bytes per device
*/
CachingDeviceAllocator(
bool skip_cleanup = false,
bool debug = false)
:
bin_growth(8),
min_bin(3),
max_bin(7),
min_bin_bytes(IntPow(bin_growth, min_bin)),
max_bin_bytes(IntPow(bin_growth, max_bin)),
max_cached_bytes((max_bin_bytes * 3) - 1),
skip_cleanup(skip_cleanup),
debug(debug),
cached_blocks(BlockDescriptor::SizeCompare),
live_blocks(BlockDescriptor::PtrCompare)
{}
/**
* \brief Sets the limit on the number bytes this allocator is allowed to cache per device.
*
* Changing the ceiling of cached bytes does not cause any allocations (in-use or
* cached-in-reserve) to be freed. See \p FreeAllCached().
*/
cudaError_t SetMaxCachedBytes(
size_t max_cached_bytes)
{
// Lock
mutex.Lock();
if (debug) _CubLog("Changing max_cached_bytes (%lld -> %lld)\n", (long long) this->max_cached_bytes, (long long) max_cached_bytes);
this->max_cached_bytes = max_cached_bytes;
// Unlock
mutex.Unlock();
return cudaSuccess;
}
/**
* \brief Provides a suitable allocation of device memory for the given size on the specified device.
*
* Once freed, the allocation becomes available immediately for reuse within the \p active_stream
* with which it was associated with during allocation, and it becomes available for reuse within other
* streams when all prior work submitted to \p active_stream has completed.
*/
cudaError_t DeviceAllocate(
int device, ///< [in] Device on which to place the allocation
void **d_ptr, ///< [out] Reference to pointer to the allocation
size_t bytes, ///< [in] Minimum number of bytes for the allocation
cudaStream_t active_stream = 0) ///< [in] The stream to be associated with this allocation
{
*d_ptr = NULL;
int entrypoint_device = INVALID_DEVICE_ORDINAL;
cudaError_t error = cudaSuccess;
if (device == INVALID_DEVICE_ORDINAL)
{
if (CubDebug(error = cudaGetDevice(&entrypoint_device))) return error;
device = entrypoint_device;
}
// Create a block descriptor for the requested allocation
bool found = false;
BlockDescriptor search_key(device);
search_key.associated_stream = active_stream;
NearestPowerOf(search_key.bin, search_key.bytes, bin_growth, bytes);
if (search_key.bin > max_bin)
{
// Bin is greater than our maximum bin: allocate the request
// exactly and give out-of-bounds bin. It will not be cached
// for reuse when returned.
search_key.bin = INVALID_BIN;
search_key.bytes = bytes;
}
else
{
// Search for a suitable cached allocation: lock
mutex.Lock();
if (search_key.bin < min_bin)
{
// Bin is less than minimum bin: round up
search_key.bin = min_bin;
search_key.bytes = min_bin_bytes;
}
// Iterate through the range of cached blocks on the same device in the same bin
CachedBlocks::iterator block_itr = cached_blocks.lower_bound(search_key);
while ((block_itr != cached_blocks.end())
&& (block_itr->device == device)
&& (block_itr->bin == search_key.bin))
{
// To prevent races with reusing blocks returned by the host but still
// in use by the device, only consider cached blocks that are
// either (from the active stream) or (from an idle stream)
if ((active_stream == block_itr->associated_stream) ||
(cudaEventQuery(block_itr->ready_event) != cudaErrorNotReady))
{
// Reuse existing cache block. Insert into live blocks.
found = true;
search_key = *block_itr;
search_key.associated_stream = active_stream;
live_blocks.insert(search_key);
// Remove from free blocks
cached_bytes[device].free -= search_key.bytes;
cached_bytes[device].live += search_key.bytes;
if (debug) _CubLog("\tDevice %d reused cached block at %p (%lld bytes) for stream %lld (previously associated with stream %lld).\n",
device, search_key.d_ptr, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) block_itr->associated_stream);
cached_blocks.erase(block_itr);
break;
}
block_itr++;
}
// Done searching: unlock
mutex.Unlock();
}
// Allocate the block if necessary
if (!found)
{
// Set runtime's current device to specified device (entrypoint may not be set)
if (device != entrypoint_device)
{
if (CubDebug(error = cudaGetDevice(&entrypoint_device))) return error;
if (CubDebug(error = cudaSetDevice(device))) return error;
}
// Attempt to allocate
if (CubDebug(error = cudaMalloc(&search_key.d_ptr, search_key.bytes)) == cudaErrorMemoryAllocation)
{
// The allocation attempt failed: free all cached blocks on device and retry
if (debug) _CubLog("\tDevice %d failed to allocate %lld bytes for stream %lld, retrying after freeing cached allocations",
device, (long long) search_key.bytes, (long long) search_key.associated_stream);
error = cudaSuccess; // Reset the error we will return
cudaGetLastError(); // Reset CUDART's error
// Lock
mutex.Lock();
// Iterate the range of free blocks on the same device
BlockDescriptor free_key(device);
CachedBlocks::iterator block_itr = cached_blocks.lower_bound(free_key);
while ((block_itr != cached_blocks.end()) && (block_itr->device == device))
{
// No need to worry about synchronization with the device: cudaFree is
// blocking and will synchronize across all kernels executing
// on the current device
// Free device memory and destroy stream event.
if (CubDebug(error = cudaFree(block_itr->d_ptr))) break;
if (CubDebug(error = cudaEventDestroy(block_itr->ready_event))) break;
// Reduce balance and erase entry
cached_bytes[device].free -= block_itr->bytes;
if (debug) _CubLog("\tDevice %d freed %lld bytes.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n",
device, (long long) block_itr->bytes, (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live);
cached_blocks.erase(block_itr);
block_itr++;
}
// Unlock
mutex.Unlock();
// Return under error
if (error) return error;
// Try to allocate again
if (CubDebug(error = cudaMalloc(&search_key.d_ptr, search_key.bytes))) return error;
}
// Create ready event
if (CubDebug(error = cudaEventCreateWithFlags(&search_key.ready_event, cudaEventDisableTiming)))
return error;
// Insert into live blocks
mutex.Lock();
live_blocks.insert(search_key);
cached_bytes[device].live += search_key.bytes;
mutex.Unlock();
if (debug) _CubLog("\tDevice %d allocated new device block at %p (%lld bytes associated with stream %lld).\n",
device, search_key.d_ptr, (long long) search_key.bytes, (long long) search_key.associated_stream);
// Attempt to revert back to previous device if necessary
if ((entrypoint_device != INVALID_DEVICE_ORDINAL) && (entrypoint_device != device))
{
if (CubDebug(error = cudaSetDevice(entrypoint_device))) return error;
}
}
// Copy device pointer to output parameter
*d_ptr = search_key.d_ptr;
if (debug) _CubLog("\t\t%lld available blocks cached (%lld bytes), %lld live blocks outstanding(%lld bytes).\n",
(long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live);
return error;
}
/**
* \brief Provides a suitable allocation of device memory for the given size on the current device.
*
* Once freed, the allocation becomes available immediately for reuse within the \p active_stream
* with which it was associated with during allocation, and it becomes available for reuse within other
* streams when all prior work submitted to \p active_stream has completed.
*/
cudaError_t DeviceAllocate(
void **d_ptr, ///< [out] Reference to pointer to the allocation
size_t bytes, ///< [in] Minimum number of bytes for the allocation
cudaStream_t active_stream = 0) ///< [in] The stream to be associated with this allocation
{
return DeviceAllocate(INVALID_DEVICE_ORDINAL, d_ptr, bytes, active_stream);
}
/**
* \brief Frees a live allocation of device memory on the specified device, returning it to the allocator.
*
* Once freed, the allocation becomes available immediately for reuse within the \p active_stream
* with which it was associated with during allocation, and it becomes available for reuse within other
* streams when all prior work submitted to \p active_stream has completed.
*/
cudaError_t DeviceFree(
int device,
void* d_ptr)
{
int entrypoint_device = INVALID_DEVICE_ORDINAL;
cudaError_t error = cudaSuccess;
if (device == INVALID_DEVICE_ORDINAL)
{
if (CubDebug(error = cudaGetDevice(&entrypoint_device)))
return error;
device = entrypoint_device;
}
// Lock
mutex.Lock();
// Find corresponding block descriptor
bool recached = false;
BlockDescriptor search_key(d_ptr, device);
BusyBlocks::iterator block_itr = live_blocks.find(search_key);
if (block_itr != live_blocks.end())
{
// Remove from live blocks
search_key = *block_itr;
live_blocks.erase(block_itr);
cached_bytes[device].live -= search_key.bytes;
// Keep the returned allocation if bin is valid and we won't exceed the max cached threshold
if ((search_key.bin != INVALID_BIN) && (cached_bytes[device].free + search_key.bytes <= max_cached_bytes))
{
// Insert returned allocation into free blocks
recached = true;
cached_blocks.insert(search_key);
cached_bytes[device].free += search_key.bytes;
if (debug) _CubLog("\tDevice %d returned %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks outstanding. (%lld bytes)\n",
device, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) cached_blocks.size(),
(long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live);
}
}
// Unlock
mutex.Unlock();
// First set to specified device (entrypoint may not be set)
if (device != entrypoint_device)
{
if (CubDebug(error = cudaGetDevice(&entrypoint_device))) return error;
if (CubDebug(error = cudaSetDevice(device))) return error;
}
if (recached)
{
// Insert the ready event in the associated stream (must have current device set properly)
if (CubDebug(error = cudaEventRecord(search_key.ready_event, search_key.associated_stream))) return error;
}
else
{
// Free the allocation from the runtime and cleanup the event.
if (CubDebug(error = cudaFree(d_ptr))) return error;
if (CubDebug(error = cudaEventDestroy(search_key.ready_event))) return error;
if (debug) _CubLog("\tDevice %d freed %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n",
device, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live);
}
// Reset device
if ((entrypoint_device != INVALID_DEVICE_ORDINAL) && (entrypoint_device != device))
{
if (CubDebug(error = cudaSetDevice(entrypoint_device))) return error;
}
return error;
}
/**
* \brief Frees a live allocation of device memory on the current device, returning it to the allocator.
*
* Once freed, the allocation becomes available immediately for reuse within the \p active_stream
* with which it was associated with during allocation, and it becomes available for reuse within other
* streams when all prior work submitted to \p active_stream has completed.
*/
cudaError_t DeviceFree(
void* d_ptr)
{
return DeviceFree(INVALID_DEVICE_ORDINAL, d_ptr);
}
/**
* \brief Frees all cached device allocations on all devices
*/
cudaError_t FreeAllCached()
{
cudaError_t error = cudaSuccess;
int entrypoint_device = INVALID_DEVICE_ORDINAL;
int current_device = INVALID_DEVICE_ORDINAL;
mutex.Lock();
while (!cached_blocks.empty())
{
// Get first block
CachedBlocks::iterator begin = cached_blocks.begin();
// Get entry-point device ordinal if necessary
if (entrypoint_device == INVALID_DEVICE_ORDINAL)
{
if (CubDebug(error = cudaGetDevice(&entrypoint_device))) break;
}
// Set current device ordinal if necessary
if (begin->device != current_device)
{
if (CubDebug(error = cudaSetDevice(begin->device))) break;
current_device = begin->device;
}
// Free device memory
if (CubDebug(error = cudaFree(begin->d_ptr))) break;
if (CubDebug(error = cudaEventDestroy(begin->ready_event))) break;
// Reduce balance and erase entry
cached_bytes[current_device].free -= begin->bytes;
if (debug) _CubLog("\tDevice %d freed %lld bytes.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n",
current_device, (long long) begin->bytes, (long long) cached_blocks.size(), (long long) cached_bytes[current_device].free, (long long) live_blocks.size(), (long long) cached_bytes[current_device].live);
cached_blocks.erase(begin);
}
mutex.Unlock();
// Attempt to revert back to entry-point device if necessary
if (entrypoint_device != INVALID_DEVICE_ORDINAL)
{
if (CubDebug(error = cudaSetDevice(entrypoint_device))) return error;
}
return error;
}
/**
* \brief Destructor
*/
virtual ~CachingDeviceAllocator()
{
if (!skip_cleanup)
FreeAllCached();
}
};
/** @} */ // end group UtilMgmt
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external | rapidsai_public_repos/nvgraph/external/cub_semiring/util_device.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Properties of a given CUDA device and the corresponding PTX bundle
*/
#pragma once
#include "util_type.cuh"
#include "util_arch.cuh"
#include "util_debug.cuh"
#include "util_namespace.cuh"
#include "util_macro.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilMgmt
* @{
*/
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* Alias temporaries to externally-allocated device storage (or simply return the amount of storage needed).
*/
template <int ALLOCATIONS>
__host__ __device__ __forceinline__
cudaError_t AliasTemporaries(
void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \t d_temp_storage allocation
void* (&allocations)[ALLOCATIONS], ///< [in,out] Pointers to device allocations needed
size_t (&allocation_sizes)[ALLOCATIONS]) ///< [in] Sizes in bytes of device allocations needed
{
const int ALIGN_BYTES = 256;
const int ALIGN_MASK = ~(ALIGN_BYTES - 1);
// Compute exclusive prefix sum over allocation requests
size_t allocation_offsets[ALLOCATIONS];
size_t bytes_needed = 0;
for (int i = 0; i < ALLOCATIONS; ++i)
{
size_t allocation_bytes = (allocation_sizes[i] + ALIGN_BYTES - 1) & ALIGN_MASK;
allocation_offsets[i] = bytes_needed;
bytes_needed += allocation_bytes;
}
bytes_needed += ALIGN_BYTES - 1;
// Check if the caller is simply requesting the size of the storage allocation
if (!d_temp_storage)
{
temp_storage_bytes = bytes_needed;
return cudaSuccess;
}
// Check if enough storage provided
if (temp_storage_bytes < bytes_needed)
{
return CubDebug(cudaErrorInvalidValue);
}
// Alias
d_temp_storage = (void *) ((size_t(d_temp_storage) + ALIGN_BYTES - 1) & ALIGN_MASK);
for (int i = 0; i < ALLOCATIONS; ++i)
{
allocations[i] = static_cast<char*>(d_temp_storage) + allocation_offsets[i];
}
return cudaSuccess;
}
/**
* Empty kernel for querying PTX manifest metadata (e.g., version) for the current device
*/
template <typename T>
__global__ void EmptyKernel(void) { }
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Retrieves the PTX version that will be used on the current device (major * 100 + minor * 10)
*/
CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t PtxVersion(int &ptx_version)
{
struct Dummy
{
/// Type definition of the EmptyKernel kernel entry point
typedef void (*EmptyKernelPtr)();
/// Force EmptyKernel<void> to be generated if this class is used
CUB_RUNTIME_FUNCTION __forceinline__
EmptyKernelPtr Empty()
{
return EmptyKernel<void>;
}
};
#ifndef CUB_RUNTIME_ENABLED
(void)ptx_version;
// CUDA API calls not supported from this device
return cudaErrorInvalidConfiguration;
#elif (CUB_PTX_ARCH > 0)
ptx_version = CUB_PTX_ARCH;
return cudaSuccess;
#else
cudaError_t error = cudaSuccess;
do
{
cudaFuncAttributes empty_kernel_attrs;
if (CubDebug(error = cudaFuncGetAttributes(&empty_kernel_attrs, EmptyKernel<void>))) break;
ptx_version = empty_kernel_attrs.ptxVersion * 10;
}
while (0);
return error;
#endif
}
/**
* \brief Retrieves the SM version (major * 100 + minor * 10)
*/
CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t SmVersion(int &sm_version, int device_ordinal)
{
#ifndef CUB_RUNTIME_ENABLED
(void)sm_version;
(void)device_ordinal;
// CUDA API calls not supported from this device
return cudaErrorInvalidConfiguration;
#else
cudaError_t error = cudaSuccess;
do
{
// Fill in SM version
int major, minor;
if (CubDebug(error = cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, device_ordinal))) break;
if (CubDebug(error = cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, device_ordinal))) break;
sm_version = major * 100 + minor * 10;
}
while (0);
return error;
#endif
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* Synchronize the stream if specified
*/
CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t SyncStream(cudaStream_t stream)
{
#if (CUB_PTX_ARCH == 0)
return cudaStreamSynchronize(stream);
#else
(void)stream;
// Device can't yet sync on a specific stream
return cudaDeviceSynchronize();
#endif
}
/**
* \brief Computes maximum SM occupancy in thread blocks for executing the given kernel function pointer \p kernel_ptr on the current device with \p block_threads per thread block.
*
* \par Snippet
* The code snippet below illustrates the use of the MaxSmOccupancy function.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/util_device.cuh>
*
* template <typename T>
* __global__ void ExampleKernel()
* {
* // Allocate shared memory for BlockScan
* __shared__ volatile T buffer[4096];
*
* ...
* }
*
* ...
*
* // Determine SM occupancy for ExampleKernel specialized for unsigned char
* int max_sm_occupancy;
* MaxSmOccupancy(max_sm_occupancy, ExampleKernel<unsigned char>, 64);
*
* // max_sm_occupancy <-- 4 on SM10
* // max_sm_occupancy <-- 8 on SM20
* // max_sm_occupancy <-- 12 on SM35
*
* \endcode
*
*/
template <typename KernelPtr>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t MaxSmOccupancy(
int &max_sm_occupancy, ///< [out] maximum number of thread blocks that can reside on a single SM
KernelPtr kernel_ptr, ///< [in] Kernel pointer for which to compute SM occupancy
int block_threads, ///< [in] Number of threads per thread block
int dynamic_smem_bytes = 0)
{
#ifndef CUB_RUNTIME_ENABLED
(void)dynamic_smem_bytes;
(void)block_threads;
(void)kernel_ptr;
(void)max_sm_occupancy;
// CUDA API calls not supported from this device
return CubDebug(cudaErrorInvalidConfiguration);
#else
return cudaOccupancyMaxActiveBlocksPerMultiprocessor (
&max_sm_occupancy,
kernel_ptr,
block_threads,
dynamic_smem_bytes);
#endif // CUB_RUNTIME_ENABLED
}
/******************************************************************************
* Policy management
******************************************************************************/
/**
* Kernel dispatch configuration
*/
struct KernelConfig
{
int block_threads;
int items_per_thread;
int tile_size;
int sm_occupancy;
CUB_RUNTIME_FUNCTION __forceinline__
KernelConfig() : block_threads(0), items_per_thread(0), tile_size(0), sm_occupancy(0) {}
template <typename AgentPolicyT, typename KernelPtrT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Init(KernelPtrT kernel_ptr)
{
block_threads = AgentPolicyT::BLOCK_THREADS;
items_per_thread = AgentPolicyT::ITEMS_PER_THREAD;
tile_size = block_threads * items_per_thread;
cudaError_t retval = MaxSmOccupancy(sm_occupancy, kernel_ptr, block_threads);
return retval;
}
};
/// Helper for dispatching into a policy chain
template <int PTX_VERSION, typename PolicyT, typename PrevPolicyT>
struct ChainedPolicy
{
/// The policy for the active compiler pass
typedef typename If<(CUB_PTX_ARCH < PTX_VERSION), typename PrevPolicyT::ActivePolicy, PolicyT>::Type ActivePolicy;
/// Specializes and dispatches op in accordance to the first policy in the chain of adequate PTX version
template <typename FunctorT>
CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Invoke(int ptx_version, FunctorT &op)
{
if (ptx_version < PTX_VERSION) {
return PrevPolicyT::Invoke(ptx_version, op);
}
return op.template Invoke<PolicyT>();
}
};
/// Helper for dispatching into a policy chain (end-of-chain specialization)
template <int PTX_VERSION, typename PolicyT>
struct ChainedPolicy<PTX_VERSION, PolicyT, PolicyT>
{
/// The policy for the active compiler pass
typedef PolicyT ActivePolicy;
/// Specializes and dispatches op in accordance to the first policy in the chain of adequate PTX version
template <typename FunctorT>
CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Invoke(int /*ptx_version*/, FunctorT &op) {
return op.template Invoke<PolicyT>();
}
};
#endif // Do not document
/** @} */ // end group UtilMgmt
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external | rapidsai_public_repos/nvgraph/external/cub_semiring/util_macro.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Common C/C++ macro utilities
******************************************************************************/
#pragma once
#include "util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilModule
* @{
*/
#ifndef CUB_ALIGN
#if defined(_WIN32) || defined(_WIN64)
/// Align struct
#define CUB_ALIGN(bytes) __declspec(align(32))
#else
/// Align struct
#define CUB_ALIGN(bytes) __attribute__((aligned(bytes)))
#endif
#endif
#ifndef CUB_MAX
/// Select maximum(a, b)
#define CUB_MAX(a, b) (((b) > (a)) ? (b) : (a))
#endif
#ifndef CUB_MIN
/// Select minimum(a, b)
#define CUB_MIN(a, b) (((b) < (a)) ? (b) : (a))
#endif
#ifndef CUB_QUOTIENT_FLOOR
/// Quotient of x/y rounded down to nearest integer
#define CUB_QUOTIENT_FLOOR(x, y) ((x) / (y))
#endif
#ifndef CUB_QUOTIENT_CEILING
/// Quotient of x/y rounded up to nearest integer
#define CUB_QUOTIENT_CEILING(x, y) (((x) + (y) - 1) / (y))
#endif
#ifndef CUB_ROUND_UP_NEAREST
/// x rounded up to the nearest multiple of y
#define CUB_ROUND_UP_NEAREST(x, y) ((((x) + (y) - 1) / (y)) * y)
#endif
#ifndef CUB_ROUND_DOWN_NEAREST
/// x rounded down to the nearest multiple of y
#define CUB_ROUND_DOWN_NEAREST(x, y) (((x) / (y)) * y)
#endif
#ifndef CUB_STATIC_ASSERT
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
#define CUB_CAT_(a, b) a ## b
#define CUB_CAT(a, b) CUB_CAT_(a, b)
#endif // DOXYGEN_SHOULD_SKIP_THIS
/// Static assert
#define CUB_STATIC_ASSERT(cond, msg) typedef int CUB_CAT(cub_static_assert, __LINE__)[(cond) ? 1 : -1]
#endif
/** @} */ // end group UtilModule
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external | rapidsai_public_repos/nvgraph/external/cub_semiring/util_debug.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Error and event logging routines.
*
* The following macros definitions are supported:
* - \p CUB_LOG. Simple event messages are printed to \p stdout.
*/
#pragma once
#include <stdio.h>
#include "util_namespace.cuh"
#include "util_arch.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilMgmt
* @{
*/
/// CUB error reporting macro (prints error messages to stderr)
#if (defined(DEBUG) || defined(_DEBUG)) && !defined(CUB_STDERR)
#define CUB_STDERR
#endif
/**
* \brief %If \p CUB_STDERR is defined and \p error is not \p cudaSuccess, the corresponding error message is printed to \p stderr (or \p stdout in device code) along with the supplied source context.
*
* \return The CUDA error.
*/
__host__ __device__ __forceinline__ cudaError_t Debug(
cudaError_t error,
const char* filename,
int line)
{
(void)filename;
(void)line;
#ifdef CUB_STDERR
if (error)
{
#if (CUB_PTX_ARCH == 0)
fprintf(stderr, "CUDA error %d [%s, %d]: %s\n", error, filename, line, cudaGetErrorString(error));
fflush(stderr);
#elif (CUB_PTX_ARCH >= 200)
printf("CUDA error %d [block (%d,%d,%d) thread (%d,%d,%d), %s, %d]\n", error, blockIdx.z, blockIdx.y, blockIdx.x, threadIdx.z, threadIdx.y, threadIdx.x, filename, line);
#endif
}
#endif
return error;
}
/**
* \brief Debug macro
*/
#ifndef CubDebug
#define CubDebug(e) cub::Debug((cudaError_t) (e), __FILE__, __LINE__)
#endif
/**
* \brief Debug macro with exit
*/
#ifndef CubDebugExit
#define CubDebugExit(e) if (cub::Debug((cudaError_t) (e), __FILE__, __LINE__)) { exit(1); }
#endif
/**
* \brief Log macro for printf statements.
*/
#if !defined(_CubLog)
#if !(defined(__clang__) && defined(__CUDA__))
#if (CUB_PTX_ARCH == 0)
#define _CubLog(format, ...) printf(format,__VA_ARGS__);
#elif (CUB_PTX_ARCH >= 200)
#define _CubLog(format, ...) printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, blockIdx.z, blockIdx.y, blockIdx.x, threadIdx.z, threadIdx.y, threadIdx.x, __VA_ARGS__);
#endif
#else
// XXX shameless hack for clang around variadic printf...
// Compilies w/o supplying -std=c++11 but shows warning,
// so we sielence them :)
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wunnamed-type-template-args"
template <class... Args>
inline __host__ __device__ void va_printf(char const* format, Args const&... args)
{
#ifdef __CUDA_ARCH__
printf(format, blockIdx.z, blockIdx.y, blockIdx.x, threadIdx.z, threadIdx.y, threadIdx.x, args...);
#else
printf(format, args...);
#endif
}
#ifndef __CUDA_ARCH__
#define _CubLog(format, ...) va_printf(format,__VA_ARGS__);
#else
#define _CubLog(format, ...) va_printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, __VA_ARGS__);
#endif
#endif
#endif
/** @} */ // end group UtilMgmt
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/iterator/constant_input_iterator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Random-access iterator types
*/
#pragma once
#include <iterator>
#include <iostream>
#include "../thread/thread_load.cuh"
#include "../thread/thread_store.cuh"
#include "../util_namespace.cuh"
#if (THRUST_VERSION >= 100700)
// This iterator is compatible with Thrust API 1.7 and newer
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/iterator_traits.h>
#endif // THRUST_VERSION
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilIterator
* @{
*/
/**
* \brief A random-access input generator for dereferencing a sequence of homogeneous values
*
* \par Overview
* - Read references to a ConstantInputIteratorTiterator always return the supplied constant
* of type \p ValueType.
* - Can be used with any data type.
* - Can be constructed, manipulated, dereferenced, and exchanged within and between host and device
* functions.
* - Compatible with Thrust API v1.7 or newer.
*
* \par Snippet
* The code snippet below illustrates the use of \p ConstantInputIteratorTto
* dereference a sequence of homogeneous doubles.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/iterator/constant_input_iterator.cuh>
*
* cub::ConstantInputIterator<double> itr(5.0);
*
* printf("%f\n", itr[0]); // 5.0
* printf("%f\n", itr[1]); // 5.0
* printf("%f\n", itr[2]); // 5.0
* printf("%f\n", itr[50]); // 5.0
*
* \endcode
*
* \tparam ValueType The value type of this iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
typename ValueType,
typename OffsetT = ptrdiff_t>
class ConstantInputIterator
{
public:
// Required iterator traits
typedef ConstantInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef ValueType value_type; ///< The type of the element the iterator can point to
typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef ValueType reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef typename thrust::detail::iterator_facade_category<
thrust::any_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
ValueType val;
OffsetT offset;
#ifdef _WIN32
OffsetT pad[CUB_MAX(1, (16 / sizeof(OffsetT) - 1))]; // Workaround for win32 parameter-passing bug (ulonglong2 argmin DeviceReduce)
#endif
public:
/// Constructor
__host__ __device__ __forceinline__ ConstantInputIterator(
ValueType val, ///< Starting value for the iterator instance to report
OffsetT offset = 0) ///< Base offset
:
val(val),
offset(offset)
{}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
offset++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
offset++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
return val;
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(val, offset + n);
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
offset += n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(val, offset - n);
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
offset -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return offset - other.offset;
}
/// Array subscript
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance /*n*/) const
{
return val;
}
/// Structure dereference
__host__ __device__ __forceinline__ pointer operator->()
{
return &val;
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return (offset == rhs.offset) && ((val == rhs.val));
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return (offset != rhs.offset) || (val!= rhs.val);
}
/// ostream operator
friend std::ostream& operator<<(std::ostream& os, const self_type& itr)
{
os << "[" << itr.val << "," << itr.offset << "]";
return os;
}
};
/** @} */ // end group UtilIterator
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/iterator/tex_ref_input_iterator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Random-access iterator types
*/
#pragma once
#include <iterator>
#include <iostream>
#include "../thread/thread_load.cuh"
#include "../thread/thread_store.cuh"
#include "../util_device.cuh"
#include "../util_debug.cuh"
#include "../util_namespace.cuh"
#if (CUDA_VERSION >= 5050) || defined(DOXYGEN_ACTIVE) // This iterator is compatible with CUDA 5.5 and newer
#if (THRUST_VERSION >= 100700) // This iterator is compatible with Thrust API 1.7 and newer
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/iterator_traits.h>
#endif // THRUST_VERSION
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Static file-scope Tesla/Fermi-style texture references
*****************************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
// Anonymous namespace
namespace {
/// Global texture reference specialized by type
template <typename T>
struct IteratorTexRef
{
/// And by unique ID
template <int UNIQUE_ID>
struct TexId
{
// Largest texture word we can use in device
typedef typename UnitWord<T>::DeviceWord DeviceWord;
typedef typename UnitWord<T>::TextureWord TextureWord;
// Number of texture words per T
enum {
DEVICE_MULTIPLE = sizeof(T) / sizeof(DeviceWord),
TEXTURE_MULTIPLE = sizeof(T) / sizeof(TextureWord)
};
// Texture reference type
typedef texture<TextureWord> TexRef;
// Texture reference
static TexRef ref;
/// Bind texture
static cudaError_t BindTexture(void *d_in, size_t &offset)
{
if (d_in)
{
cudaChannelFormatDesc tex_desc = cudaCreateChannelDesc<TextureWord>();
ref.channelDesc = tex_desc;
return (CubDebug(cudaBindTexture(&offset, ref, d_in)));
}
return cudaSuccess;
}
/// Unbind texture
static cudaError_t UnbindTexture()
{
return CubDebug(cudaUnbindTexture(ref));
}
/// Fetch element
template <typename Distance>
static __device__ __forceinline__ T Fetch(Distance tex_offset)
{
DeviceWord temp[DEVICE_MULTIPLE];
TextureWord *words = reinterpret_cast<TextureWord*>(temp);
#pragma unroll
for (int i = 0; i < TEXTURE_MULTIPLE; ++i)
{
words[i] = tex1Dfetch(ref, (tex_offset * TEXTURE_MULTIPLE) + i);
}
return reinterpret_cast<T&>(temp);
}
};
};
// Texture reference definitions
template <typename T>
template <int UNIQUE_ID>
typename IteratorTexRef<T>::template TexId<UNIQUE_ID>::TexRef IteratorTexRef<T>::template TexId<UNIQUE_ID>::ref = 0;
} // Anonymous namespace
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \addtogroup UtilIterator
* @{
*/
/**
* \brief A random-access input wrapper for dereferencing array values through texture cache. Uses older Tesla/Fermi-style texture references.
*
* \par Overview
* - TexRefInputIteratorTwraps a native device pointer of type <tt>ValueType*</tt>. References
* to elements are to be loaded through texture cache.
* - Can be used to load any data type from memory through texture cache.
* - Can be manipulated and exchanged within and between host and device
* functions, can only be constructed within host functions, and can only be
* dereferenced within device functions.
* - The \p UNIQUE_ID template parameter is used to statically name the underlying texture
* reference. Only one TexRefInputIteratorTinstance can be bound at any given time for a
* specific combination of (1) data type \p T, (2) \p UNIQUE_ID, (3) host
* thread, and (4) compilation .o unit.
* - With regard to nested/dynamic parallelism, TexRefInputIteratorTiterators may only be
* created by the host thread and used by a top-level kernel (i.e. the one which is launched
* from the host).
* - Compatible with Thrust API v1.7 or newer.
* - Compatible with CUDA toolkit v5.5 or newer.
*
* \par Snippet
* The code snippet below illustrates the use of \p TexRefInputIteratorTto
* dereference a device array of doubles through texture cache.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/iterator/tex_ref_input_iterator.cuh>
*
* // Declare, allocate, and initialize a device array
* int num_items; // e.g., 7
* double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0]
*
* // Create an iterator wrapper
* cub::TexRefInputIterator<double, __LINE__> itr;
* itr.BindTexture(d_in, sizeof(double) * num_items);
* ...
*
* // Within device code:
* printf("%f\n", itr[0]); // 8.0
* printf("%f\n", itr[1]); // 6.0
* printf("%f\n", itr[6]); // 9.0
*
* ...
* itr.UnbindTexture();
*
* \endcode
*
* \tparam T The value type of this iterator
* \tparam UNIQUE_ID A globally-unique identifier (within the compilation unit) to name the underlying texture reference
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
typename T,
int UNIQUE_ID,
typename OffsetT = ptrdiff_t>
class TexRefInputIterator
{
public:
// Required iterator traits
typedef TexRefInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef T value_type; ///< The type of the element the iterator can point to
typedef T* pointer; ///< The type of a pointer to an element the iterator can point to
typedef T reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef typename thrust::detail::iterator_facade_category<
thrust::device_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
T* ptr;
difference_type tex_offset;
// Texture reference wrapper (old Tesla/Fermi-style textures)
typedef typename IteratorTexRef<T>::template TexId<UNIQUE_ID> TexId;
public:
/// Constructor
__host__ __device__ __forceinline__ TexRefInputIterator()
:
ptr(NULL),
tex_offset(0)
{}
/// Use this iterator to bind \p ptr with a texture reference
template <typename QualifiedT>
cudaError_t BindTexture(
QualifiedT *ptr, ///< Native pointer to wrap that is aligned to cudaDeviceProp::textureAlignment
size_t bytes = size_t(-1), ///< Number of bytes in the range
size_t tex_offset = 0) ///< OffsetT (in items) from \p ptr denoting the position of the iterator
{
this->ptr = const_cast<typename RemoveQualifiers<QualifiedT>::Type *>(ptr);
size_t offset;
cudaError_t retval = TexId::BindTexture(this->ptr + tex_offset, offset);
this->tex_offset = (difference_type) (offset / sizeof(QualifiedT));
return retval;
}
/// Unbind this iterator from its texture reference
cudaError_t UnbindTexture()
{
return TexId::UnbindTexture();
}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
tex_offset++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
tex_offset++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
#if (CUB_PTX_ARCH == 0)
// Simply dereference the pointer on the host
return ptr[tex_offset];
#else
// Use the texture reference
return TexId::Fetch(tex_offset);
#endif
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval;
retval.ptr = ptr;
retval.tex_offset = tex_offset + n;
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
tex_offset += n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval;
retval.ptr = ptr;
retval.tex_offset = tex_offset - n;
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
tex_offset -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return tex_offset - other.tex_offset;
}
/// Array subscript
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const
{
self_type offset = (*this) + n;
return *offset;
}
/// Structure dereference
__host__ __device__ __forceinline__ pointer operator->()
{
return &(*(*this));
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return ((ptr == rhs.ptr) && (tex_offset == rhs.tex_offset));
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return ((ptr != rhs.ptr) || (tex_offset != rhs.tex_offset));
}
/// ostream operator
friend std::ostream& operator<<(std::ostream& os, const self_type& itr)
{
return os;
}
};
/** @} */ // end group UtilIterator
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
#endif // CUDA_VERSION
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/iterator/transform_input_iterator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Random-access iterator types
*/
#pragma once
#include <iterator>
#include <iostream>
#include "../thread/thread_load.cuh"
#include "../thread/thread_store.cuh"
#include "../util_device.cuh"
#include "../util_namespace.cuh"
#if (THRUST_VERSION >= 100700)
// This iterator is compatible with Thrust API 1.7 and newer
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/iterator_traits.h>
#endif // THRUST_VERSION
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilIterator
* @{
*/
/**
* \brief A random-access input wrapper for transforming dereferenced values.
*
* \par Overview
* - TransformInputIteratorTwraps a unary conversion functor of type \p
* ConversionOp and a random-access input iterator of type <tt>InputIteratorT</tt>,
* using the former to produce references of type \p ValueType from the latter.
* - Can be used with any data type.
* - Can be constructed, manipulated, and exchanged within and between host and device
* functions. Wrapped host memory can only be dereferenced on the host, and wrapped
* device memory can only be dereferenced on the device.
* - Compatible with Thrust API v1.7 or newer.
*
* \par Snippet
* The code snippet below illustrates the use of \p TransformInputIteratorTto
* dereference an array of integers, tripling the values and converting them to doubles.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/iterator/transform_input_iterator.cuh>
*
* // Functor for tripling integer values and converting to doubles
* struct TripleDoubler
* {
* __host__ __device__ __forceinline__
* double operator()(const int &a) const {
* return double(a * 3);
* }
* };
*
* // Declare, allocate, and initialize a device array
* int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* TripleDoubler conversion_op;
*
* // Create an iterator wrapper
* cub::TransformInputIterator<double, TripleDoubler, int*> itr(d_in, conversion_op);
*
* // Within device code:
* printf("%f\n", itr[0]); // 24.0
* printf("%f\n", itr[1]); // 18.0
* printf("%f\n", itr[6]); // 27.0
*
* \endcode
*
* \tparam ValueType The value type of this iterator
* \tparam ConversionOp Unary functor type for mapping objects of type \p InputType to type \p ValueType. Must have member <tt>ValueType operator()(const InputType &datum)</tt>.
* \tparam InputIteratorT The type of the wrapped input iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*
*/
template <
typename ValueType,
typename ConversionOp,
typename InputIteratorT,
typename OffsetT = ptrdiff_t>
class TransformInputIterator
{
public:
// Required iterator traits
typedef TransformInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef ValueType value_type; ///< The type of the element the iterator can point to
typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef ValueType reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef typename thrust::detail::iterator_facade_category<
thrust::any_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
ConversionOp conversion_op;
InputIteratorT input_itr;
public:
/// Constructor
__host__ __device__ __forceinline__ TransformInputIterator(
InputIteratorT input_itr, ///< Input iterator to wrap
ConversionOp conversion_op) ///< Conversion functor to wrap
:
conversion_op(conversion_op),
input_itr(input_itr)
{}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
input_itr++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
input_itr++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
return conversion_op(*input_itr);
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(input_itr + n, conversion_op);
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
input_itr += n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(input_itr - n, conversion_op);
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
input_itr -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return input_itr - other.input_itr;
}
/// Array subscript
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const
{
return conversion_op(input_itr[n]);
}
/// Structure dereference
__host__ __device__ __forceinline__ pointer operator->()
{
return &conversion_op(*input_itr);
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return (input_itr == rhs.input_itr);
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return (input_itr != rhs.input_itr);
}
/// ostream operator
friend std::ostream& operator<<(std::ostream& os, const self_type& itr)
{
return os;
}
};
/** @} */ // end group UtilIterator
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/iterator/tex_obj_input_iterator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Random-access iterator types
*/
#pragma once
#include <iterator>
#include <iostream>
#include "../thread/thread_load.cuh"
#include "../thread/thread_store.cuh"
#include "../util_device.cuh"
#include "../util_debug.cuh"
#include "../util_namespace.cuh"
#if (THRUST_VERSION >= 100700)
// This iterator is compatible with Thrust API 1.7 and newer
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/iterator_traits.h>
#endif // THRUST_VERSION
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilIterator
* @{
*/
/**
* \brief A random-access input wrapper for dereferencing array values through texture cache. Uses newer Kepler-style texture objects.
*
* \par Overview
* - TexObjInputIteratorTwraps a native device pointer of type <tt>ValueType*</tt>. References
* to elements are to be loaded through texture cache.
* - Can be used to load any data type from memory through texture cache.
* - Can be manipulated and exchanged within and between host and device
* functions, can only be constructed within host functions, and can only be
* dereferenced within device functions.
* - With regard to nested/dynamic parallelism, TexObjInputIteratorTiterators may only be
* created by the host thread, but can be used by any descendant kernel.
* - Compatible with Thrust API v1.7 or newer.
*
* \par Snippet
* The code snippet below illustrates the use of \p TexRefInputIteratorTto
* dereference a device array of doubles through texture cache.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/iterator/tex_obj_input_iterator.cuh>
*
* // Declare, allocate, and initialize a device array
* int num_items; // e.g., 7
* double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0]
*
* // Create an iterator wrapper
* cub::TexObjInputIterator<double> itr;
* itr.BindTexture(d_in, sizeof(double) * num_items);
* ...
*
* // Within device code:
* printf("%f\n", itr[0]); // 8.0
* printf("%f\n", itr[1]); // 6.0
* printf("%f\n", itr[6]); // 9.0
*
* ...
* itr.UnbindTexture();
*
* \endcode
*
* \tparam T The value type of this iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
typename T,
typename OffsetT = ptrdiff_t>
class TexObjInputIterator
{
public:
// Required iterator traits
typedef TexObjInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef T value_type; ///< The type of the element the iterator can point to
typedef T* pointer; ///< The type of a pointer to an element the iterator can point to
typedef T reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef typename thrust::detail::iterator_facade_category<
thrust::device_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
// Largest texture word we can use in device
typedef typename UnitWord<T>::TextureWord TextureWord;
// Number of texture words per T
enum {
TEXTURE_MULTIPLE = sizeof(T) / sizeof(TextureWord)
};
private:
T* ptr;
difference_type tex_offset;
cudaTextureObject_t tex_obj;
public:
/// Constructor
__host__ __device__ __forceinline__ TexObjInputIterator()
:
ptr(NULL),
tex_offset(0),
tex_obj(0)
{}
/// Use this iterator to bind \p ptr with a texture reference
template <typename QualifiedT>
cudaError_t BindTexture(
QualifiedT *ptr, ///< Native pointer to wrap that is aligned to cudaDeviceProp::textureAlignment
size_t bytes = size_t(-1), ///< Number of bytes in the range
size_t tex_offset = 0) ///< OffsetT (in items) from \p ptr denoting the position of the iterator
{
this->ptr = const_cast<typename RemoveQualifiers<QualifiedT>::Type *>(ptr);
this->tex_offset = tex_offset;
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<TextureWord>();
cudaResourceDesc res_desc;
cudaTextureDesc tex_desc;
memset(&res_desc, 0, sizeof(cudaResourceDesc));
memset(&tex_desc, 0, sizeof(cudaTextureDesc));
res_desc.resType = cudaResourceTypeLinear;
res_desc.res.linear.devPtr = this->ptr;
res_desc.res.linear.desc = channel_desc;
res_desc.res.linear.sizeInBytes = bytes;
tex_desc.readMode = cudaReadModeElementType;
return cudaCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL);
}
/// Unbind this iterator from its texture reference
cudaError_t UnbindTexture()
{
return cudaDestroyTextureObject(tex_obj);
}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
tex_offset++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
tex_offset++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
#if (CUB_PTX_ARCH == 0)
// Simply dereference the pointer on the host
return ptr[tex_offset];
#else
// Move array of uninitialized words, then alias and assign to return value
TextureWord words[TEXTURE_MULTIPLE];
#pragma unroll
for (int i = 0; i < TEXTURE_MULTIPLE; ++i)
{
words[i] = tex1Dfetch<TextureWord>(
tex_obj,
(tex_offset * TEXTURE_MULTIPLE) + i);
}
// Load from words
return *reinterpret_cast<T*>(words);
#endif
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval;
retval.ptr = ptr;
retval.tex_obj = tex_obj;
retval.tex_offset = tex_offset + n;
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
tex_offset += n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval;
retval.ptr = ptr;
retval.tex_obj = tex_obj;
retval.tex_offset = tex_offset - n;
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
tex_offset -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return tex_offset - other.tex_offset;
}
/// Array subscript
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const
{
self_type offset = (*this) + n;
return *offset;
}
/// Structure dereference
__host__ __device__ __forceinline__ pointer operator->()
{
return &(*(*this));
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return ((ptr == rhs.ptr) && (tex_offset == rhs.tex_offset) && (tex_obj == rhs.tex_obj));
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return ((ptr != rhs.ptr) || (tex_offset != rhs.tex_offset) || (tex_obj != rhs.tex_obj));
}
/// ostream operator
friend std::ostream& operator<<(std::ostream& os, const self_type& itr)
{
return os;
}
};
/** @} */ // end group UtilIterator
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/iterator/cache_modified_input_iterator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Random-access iterator types
*/
#pragma once
#include <iterator>
#include <iostream>
#include "../thread/thread_load.cuh"
#include "../thread/thread_store.cuh"
#include "../util_device.cuh"
#include "../util_namespace.cuh"
#if (THRUST_VERSION >= 100700)
// This iterator is compatible with Thrust API 1.7 and newer
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/iterator_traits.h>
#endif // THRUST_VERSION
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilIterator
* @{
*/
/**
* \brief A random-access input wrapper for dereferencing array values using a PTX cache load modifier.
*
* \par Overview
* - CacheModifiedInputIteratorTis a random-access input iterator that wraps a native
* device pointer of type <tt>ValueType*</tt>. \p ValueType references are
* made by reading \p ValueType values through loads modified by \p MODIFIER.
* - Can be used to load any data type from memory using PTX cache load modifiers (e.g., "LOAD_LDG",
* "LOAD_CG", "LOAD_CA", "LOAD_CS", "LOAD_CV", etc.).
* - Can be constructed, manipulated, and exchanged within and between host and device
* functions, but can only be dereferenced within device functions.
* - Compatible with Thrust API v1.7 or newer.
*
* \par Snippet
* The code snippet below illustrates the use of \p CacheModifiedInputIteratorTto
* dereference a device array of double using the "ldg" PTX load modifier
* (i.e., load values through texture cache).
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/iterator/cache_modified_input_iterator.cuh>
*
* // Declare, allocate, and initialize a device array
* double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0]
*
* // Create an iterator wrapper
* cub::CacheModifiedInputIterator<cub::LOAD_LDG, double> itr(d_in);
*
* // Within device code:
* printf("%f\n", itr[0]); // 8.0
* printf("%f\n", itr[1]); // 6.0
* printf("%f\n", itr[6]); // 9.0
*
* \endcode
*
* \tparam CacheLoadModifier The cub::CacheLoadModifier to use when accessing data
* \tparam ValueType The value type of this iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
CacheLoadModifier MODIFIER,
typename ValueType,
typename OffsetT = ptrdiff_t>
class CacheModifiedInputIterator
{
public:
// Required iterator traits
typedef CacheModifiedInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef ValueType value_type; ///< The type of the element the iterator can point to
typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef ValueType reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef typename thrust::detail::iterator_facade_category<
thrust::device_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
public:
/// Wrapped native pointer
ValueType* ptr;
/// Constructor
template <typename QualifiedValueType>
__host__ __device__ __forceinline__ CacheModifiedInputIterator(
QualifiedValueType* ptr) ///< Native pointer to wrap
:
ptr(const_cast<typename RemoveQualifiers<QualifiedValueType>::Type *>(ptr))
{}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
ptr++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
ptr++;
return *this;
}
/// Indirection
__device__ __forceinline__ reference operator*() const
{
return ThreadLoad<MODIFIER>(ptr);
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(ptr + n);
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
ptr += n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(ptr - n);
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
ptr -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return ptr - other.ptr;
}
/// Array subscript
template <typename Distance>
__device__ __forceinline__ reference operator[](Distance n) const
{
return ThreadLoad<MODIFIER>(ptr + n);
}
/// Structure dereference
__device__ __forceinline__ pointer operator->()
{
return &ThreadLoad<MODIFIER>(ptr);
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return (ptr == rhs.ptr);
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return (ptr != rhs.ptr);
}
/// ostream operator
friend std::ostream& operator<<(std::ostream& os, const self_type& /*itr*/)
{
return os;
}
};
/** @} */ // end group UtilIterator
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/iterator/arg_index_input_iterator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Random-access iterator types
*/
#pragma once
#include <iterator>
#include <iostream>
#include "../thread/thread_load.cuh"
#include "../thread/thread_store.cuh"
#include "../util_device.cuh"
#include "../util_namespace.cuh"
#include <thrust/version.h>
#if (THRUST_VERSION >= 100700)
// This iterator is compatible with Thrust API 1.7 and newer
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/iterator_traits.h>
#endif // THRUST_VERSION
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilIterator
* @{
*/
/**
* \brief A random-access input wrapper for pairing dereferenced values with their corresponding indices (forming \p KeyValuePair tuples).
*
* \par Overview
* - ArgIndexInputIteratorTwraps a random access input iterator \p itr of type \p InputIteratorT.
* Dereferencing an ArgIndexInputIteratorTat offset \p i produces a \p KeyValuePair value whose
* \p key field is \p i and whose \p value field is <tt>itr[i]</tt>.
* - Can be used with any data type.
* - Can be constructed, manipulated, and exchanged within and between host and device
* functions. Wrapped host memory can only be dereferenced on the host, and wrapped
* device memory can only be dereferenced on the device.
* - Compatible with Thrust API v1.7 or newer.
*
* \par Snippet
* The code snippet below illustrates the use of \p ArgIndexInputIteratorTto
* dereference an array of doubles
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/iterator/arg_index_input_iterator.cuh>
*
* // Declare, allocate, and initialize a device array
* double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0]
*
* // Create an iterator wrapper
* cub::ArgIndexInputIterator<double*> itr(d_in);
*
* // Within device code:
* typedef typename cub::ArgIndexInputIterator<double*>::value_type Tuple;
* Tuple item_offset_pair.key = *itr;
* printf("%f @ %d\n",
* item_offset_pair.value,
* item_offset_pair.key); // 8.0 @ 0
*
* itr = itr + 6;
* item_offset_pair.key = *itr;
* printf("%f @ %d\n",
* item_offset_pair.value,
* item_offset_pair.key); // 9.0 @ 6
*
* \endcode
*
* \tparam InputIteratorT The value type of the wrapped input iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
* \tparam OutputValueT The paired value type of the <offset,value> tuple (Default: value type of input iterator)
*/
template <
typename InputIteratorT,
typename OffsetT = ptrdiff_t,
typename OutputValueT = typename std::iterator_traits<InputIteratorT>::value_type>
class ArgIndexInputIterator
{
public:
// Required iterator traits
typedef ArgIndexInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef KeyValuePair<difference_type, OutputValueT> value_type; ///< The type of the element the iterator can point to
typedef value_type* pointer; ///< The type of a pointer to an element the iterator can point to
typedef value_type reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef typename thrust::detail::iterator_facade_category<
thrust::any_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
InputIteratorT itr;
difference_type offset;
public:
/// Constructor
__host__ __device__ __forceinline__ ArgIndexInputIterator(
InputIteratorT itr, ///< Input iterator to wrap
difference_type offset = 0) ///< OffsetT (in items) from \p itr denoting the position of the iterator
:
itr(itr),
offset(offset)
{}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
offset++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
offset++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
value_type retval;
retval.value = itr[offset];
retval.key = offset;
return retval;
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(itr, offset + n);
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
offset += n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(itr, offset - n);
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
offset -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return offset - other.offset;
}
/// Array subscript
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const
{
self_type offset = (*this) + n;
return *offset;
}
/// Structure dereference
__host__ __device__ __forceinline__ pointer operator->()
{
return &(*(*this));
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return ((itr == rhs.itr) && (offset == rhs.offset));
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return ((itr != rhs.itr) || (offset != rhs.offset));
}
/// Normalize
__host__ __device__ __forceinline__ void normalize()
{
itr += offset;
offset = 0;
}
/// ostream operator
friend std::ostream& operator<<(std::ostream& os, const self_type& /*itr*/)
{
return os;
}
};
/** @} */ // end group UtilIterator
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/iterator/cache_modified_output_iterator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Random-access iterator types
*/
#pragma once
#include <iterator>
#include <iostream>
#include "../thread/thread_load.cuh"
#include "../thread/thread_store.cuh"
#include "../util_device.cuh"
#include "../util_namespace.cuh"
#if (THRUST_VERSION >= 100700)
// This iterator is compatible with Thrust API 1.7 and newer
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/iterator_traits.h>
#endif // THRUST_VERSION
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilIterator
* @{
*/
/**
* \brief A random-access output wrapper for storing array values using a PTX cache-modifier.
*
* \par Overview
* - CacheModifiedOutputIterator is a random-access output iterator that wraps a native
* device pointer of type <tt>ValueType*</tt>. \p ValueType references are
* made by writing \p ValueType values through stores modified by \p MODIFIER.
* - Can be used to store any data type to memory using PTX cache store modifiers (e.g., "STORE_WB",
* "STORE_CG", "STORE_CS", "STORE_WT", etc.).
* - Can be constructed, manipulated, and exchanged within and between host and device
* functions, but can only be dereferenced within device functions.
* - Compatible with Thrust API v1.7 or newer.
*
* \par Snippet
* The code snippet below illustrates the use of \p CacheModifiedOutputIterator to
* dereference a device array of doubles using the "wt" PTX load modifier
* (i.e., write-through to system memory).
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/iterator/cache_modified_output_iterator.cuh>
*
* // Declare, allocate, and initialize a device array
* double *d_out; // e.g., [, , , , , , ]
*
* // Create an iterator wrapper
* cub::CacheModifiedOutputIterator<cub::STORE_WT, double> itr(d_out);
*
* // Within device code:
* itr[0] = 8.0;
* itr[1] = 66.0;
* itr[55] = 24.0;
*
* \endcode
*
* \par Usage Considerations
* - Can only be dereferenced within device code
*
* \tparam CacheStoreModifier The cub::CacheStoreModifier to use when accessing data
* \tparam ValueType The value type of this iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
CacheStoreModifier MODIFIER,
typename ValueType,
typename OffsetT = ptrdiff_t>
class CacheModifiedOutputIterator
{
private:
// Proxy object
struct Reference
{
ValueType* ptr;
/// Constructor
__host__ __device__ __forceinline__ Reference(ValueType* ptr) : ptr(ptr) {}
/// Assignment
__device__ __forceinline__ ValueType operator =(ValueType val)
{
ThreadStore<MODIFIER>(ptr, val);
return val;
}
};
public:
// Required iterator traits
typedef CacheModifiedOutputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef void value_type; ///< The type of the element the iterator can point to
typedef void pointer; ///< The type of a pointer to an element the iterator can point to
typedef Reference reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef typename thrust::detail::iterator_facade_category<
thrust::device_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
ValueType* ptr;
public:
/// Constructor
template <typename QualifiedValueType>
__host__ __device__ __forceinline__ CacheModifiedOutputIterator(
QualifiedValueType* ptr) ///< Native pointer to wrap
:
ptr(const_cast<typename RemoveQualifiers<QualifiedValueType>::Type *>(ptr))
{}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
ptr++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
ptr++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
return Reference(ptr);
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(ptr + n);
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
ptr += n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(ptr - n);
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
ptr -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return ptr - other.ptr;
}
/// Array subscript
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const
{
return Reference(ptr + n);
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return (ptr == rhs.ptr);
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return (ptr != rhs.ptr);
}
/// ostream operator
friend std::ostream& operator<<(std::ostream& os, const self_type& itr)
{
return os;
}
};
/** @} */ // end group UtilIterator
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/iterator/discard_output_iterator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Random-access iterator types
*/
#pragma once
#include <iterator>
#include <iostream>
#include "../util_namespace.cuh"
#include "../util_macro.cuh"
#if (THRUST_VERSION >= 100700)
// This iterator is compatible with Thrust API 1.7 and newer
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/iterator_traits.h>
#endif // THRUST_VERSION
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilIterator
* @{
*/
/**
* \brief A discard iterator
*/
template <typename OffsetT = ptrdiff_t>
class DiscardOutputIterator
{
public:
// Required iterator traits
typedef DiscardOutputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef void value_type; ///< The type of the element the iterator can point to
typedef void pointer; ///< The type of a pointer to an element the iterator can point to
typedef void reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef typename thrust::detail::iterator_facade_category<
thrust::any_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
OffsetT offset;
#if defined(_WIN32) || !defined(_WIN64)
// Workaround for win32 parameter-passing bug (ulonglong2 argmin DeviceReduce)
OffsetT pad[CUB_MAX(1, (16 / sizeof(OffsetT) - 1))];
#endif
public:
/// Constructor
__host__ __device__ __forceinline__ DiscardOutputIterator(
OffsetT offset = 0) ///< Base offset
:
offset(offset)
{}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
offset++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
offset++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ self_type& operator*()
{
// return self reference, which can be assigned to anything
return *this;
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(offset + n);
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
offset += n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(offset - n);
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
offset -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return offset - other.offset;
}
/// Array subscript
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator[](Distance n)
{
// return self reference, which can be assigned to anything
return *this;
}
/// Structure dereference
__host__ __device__ __forceinline__ pointer operator->()
{
return;
}
/// Assignment to self (no-op)
__host__ __device__ __forceinline__ void operator=(self_type const& other)
{
offset = other.offset;
}
/// Assignment to anything else (no-op)
template<typename T>
__host__ __device__ __forceinline__ void operator=(T const&)
{}
/// Cast to void* operator
__host__ __device__ __forceinline__ operator void*() const { return NULL; }
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return (offset == rhs.offset);
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return (offset != rhs.offset);
}
/// ostream operator
friend std::ostream& operator<<(std::ostream& os, const self_type& itr)
{
os << "[" << itr.offset << "]";
return os;
}
};
/** @} */ // end group UtilIterator
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/iterator/counting_input_iterator.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Random-access iterator types
*/
#pragma once
#include <iterator>
#include <iostream>
#include "../thread/thread_load.cuh"
#include "../thread/thread_store.cuh"
#include "../util_device.cuh"
#include "../util_namespace.cuh"
#if (THRUST_VERSION >= 100700)
// This iterator is compatible with Thrust API 1.7 and newer
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/iterator_traits.h>
#endif // THRUST_VERSION
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilIterator
* @{
*/
/**
* \brief A random-access input generator for dereferencing a sequence of incrementing integer values.
*
* \par Overview
* - After initializing a CountingInputIteratorTto a certain integer \p base, read references
* at \p offset will return the value \p base + \p offset.
* - Can be constructed, manipulated, dereferenced, and exchanged within and between host and device
* functions.
* - Compatible with Thrust API v1.7 or newer.
*
* \par Snippet
* The code snippet below illustrates the use of \p CountingInputIteratorTto
* dereference a sequence of incrementing integers.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/iterator/counting_input_iterator.cuh>
*
* cub::CountingInputIterator<int> itr(5);
*
* printf("%d\n", itr[0]); // 5
* printf("%d\n", itr[1]); // 6
* printf("%d\n", itr[2]); // 7
* printf("%d\n", itr[50]); // 55
*
* \endcode
*
* \tparam ValueType The value type of this iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
typename ValueType,
typename OffsetT = ptrdiff_t>
class CountingInputIterator
{
public:
// Required iterator traits
typedef CountingInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef ValueType value_type; ///< The type of the element the iterator can point to
typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef ValueType reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef typename thrust::detail::iterator_facade_category<
thrust::any_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
ValueType val;
public:
/// Constructor
__host__ __device__ __forceinline__ CountingInputIterator(
const ValueType &val) ///< Starting value for the iterator instance to report
:
val(val)
{}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
val++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
val++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
return val;
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(val + (ValueType) n);
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
val += (ValueType) n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(val - (ValueType) n);
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
val -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return (difference_type) (val - other.val);
}
/// Array subscript
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const
{
return val + (ValueType) n;
}
/// Structure dereference
__host__ __device__ __forceinline__ pointer operator->()
{
return &val;
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return (val == rhs.val);
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return (val != rhs.val);
}
/// ostream operator
friend std::ostream& operator<<(std::ostream& os, const self_type& itr)
{
os << "[" << itr.val << "]";
return os;
}
};
/** @} */ // end group UtilIterator
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_spmv_orig.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV.
*/
#pragma once
#include <iterator>
#include "../util_type.cuh"
#include "../block/block_reduce.cuh"
#include "../block/block_scan.cuh"
#include "../block/block_exchange.cuh"
#include "../thread/thread_search.cuh"
#include "../thread/thread_operators.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../iterator/counting_input_iterator.cuh"
#include "../iterator/tex_ref_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy
******************************************************************************/
/**
* Parameterizable tuning policy type for AgentSpmv
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
CacheLoadModifier _ROW_OFFSETS_SEARCH_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets during search
CacheLoadModifier _ROW_OFFSETS_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets
CacheLoadModifier _COLUMN_INDICES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR column-indices
CacheLoadModifier _VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR values
CacheLoadModifier _VECTOR_VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading vector values
bool _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (vs. pre-staged through shared memory)
BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use
struct AgentSpmvPolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
DIRECT_LOAD_NONZEROS = _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (pre-staged through shared memory)
};
static const CacheLoadModifier ROW_OFFSETS_SEARCH_LOAD_MODIFIER = _ROW_OFFSETS_SEARCH_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets
static const CacheLoadModifier ROW_OFFSETS_LOAD_MODIFIER = _ROW_OFFSETS_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets
static const CacheLoadModifier COLUMN_INDICES_LOAD_MODIFIER = _COLUMN_INDICES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR column-indices
static const CacheLoadModifier VALUES_LOAD_MODIFIER = _VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR values
static const CacheLoadModifier VECTOR_VALUES_LOAD_MODIFIER = _VECTOR_VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading vector values
static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
template <
typename ValueT, ///< Matrix and vector value type
typename OffsetT> ///< Signed integer type for sequence offsets
struct SpmvParams
{
const ValueT* d_values; ///< Pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix <b>A</b>.
const OffsetT* d_row_end_offsets; ///< Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values
const OffsetT* d_column_indices; ///< Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix <b>A</b>. (Indices are zero-valued.)
const ValueT* d_vector_x; ///< Pointer to the array of \p num_cols values corresponding to the dense input vector <em>x</em>
ValueT* d_vector_y; ///< Pointer to the array of \p num_rows values corresponding to the dense output vector <em>y</em>
int num_rows; ///< Number of rows of matrix <b>A</b>.
int num_cols; ///< Number of columns of matrix <b>A</b>.
int num_nonzeros; ///< Number of nonzero elements of matrix <b>A</b>.
ValueT alpha; ///< Alpha multiplicand
ValueT beta; ///< Beta addend-multiplicand
TexRefInputIterator<ValueT, 66778899, OffsetT> t_vector_x;
};
/**
* \brief AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV.
*/
template <
typename AgentSpmvPolicyT, ///< Parameterized AgentSpmvPolicy tuning policy type
typename ValueT, ///< Matrix and vector value type
typename OffsetT, ///< Signed integer type for sequence offsets
typename SemiringT, ///< Semiring type
bool HAS_ALPHA, ///< Whether the input parameter \p alpha is 1
bool HAS_BETA, ///< Whether the input parameter \p beta is 0
int PTX_ARCH = CUB_PTX_ARCH> ///< PTX compute capability
struct AgentSpmv
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
/// Constants
enum
{
BLOCK_THREADS = AgentSpmvPolicyT::BLOCK_THREADS,
ITEMS_PER_THREAD = AgentSpmvPolicyT::ITEMS_PER_THREAD,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
};
/// 2D merge path coordinate type
typedef typename CubVector<OffsetT, 2>::Type CoordinateT;
/// Input iterator wrapper types (for applying cache modifiers)
typedef CacheModifiedInputIterator<
AgentSpmvPolicyT::ROW_OFFSETS_SEARCH_LOAD_MODIFIER,
OffsetT,
OffsetT>
RowOffsetsSearchIteratorT;
typedef CacheModifiedInputIterator<
AgentSpmvPolicyT::ROW_OFFSETS_LOAD_MODIFIER,
OffsetT,
OffsetT>
RowOffsetsIteratorT;
typedef CacheModifiedInputIterator<
AgentSpmvPolicyT::COLUMN_INDICES_LOAD_MODIFIER,
OffsetT,
OffsetT>
ColumnIndicesIteratorT;
typedef CacheModifiedInputIterator<
AgentSpmvPolicyT::VALUES_LOAD_MODIFIER,
ValueT,
OffsetT>
ValueIteratorT;
typedef CacheModifiedInputIterator<
AgentSpmvPolicyT::VECTOR_VALUES_LOAD_MODIFIER,
ValueT,
OffsetT>
VectorValueIteratorT;
// Tuple type for scanning (pairs accumulated segment-value with segment-index)
typedef KeyValuePair<OffsetT, ValueT> KeyValuePairT;
// Reduce-value-by-segment scan operator
typedef ReduceByKeyOp<typename SemiringT::SumOp> ReduceBySegmentOpT;
// BlockReduce specialization
typedef BlockReduce<
ValueT,
BLOCK_THREADS,
BLOCK_REDUCE_WARP_REDUCTIONS>
BlockReduceT;
// BlockScan specialization
typedef BlockScan<
KeyValuePairT,
BLOCK_THREADS,
AgentSpmvPolicyT::SCAN_ALGORITHM>
BlockScanT;
// BlockScan specialization
typedef BlockScan<
ValueT,
BLOCK_THREADS,
AgentSpmvPolicyT::SCAN_ALGORITHM>
BlockPrefixSumT;
// BlockExchange specialization
typedef BlockExchange<
ValueT,
BLOCK_THREADS,
ITEMS_PER_THREAD>
BlockExchangeT;
/// Merge item type (either a non-zero value or a row-end offset)
union MergeItem
{
// Value type to pair with index type OffsetT (NullType if loading values directly during merge)
typedef typename If<AgentSpmvPolicyT::DIRECT_LOAD_NONZEROS, NullType, ValueT>::Type MergeValueT;
OffsetT row_end_offset;
MergeValueT nonzero;
};
/// Shared memory type required by this thread block
struct _TempStorage
{
CoordinateT tile_coords[2];
union Aliasable
{
// Smem needed for tile of merge items
MergeItem merge_items[ITEMS_PER_THREAD + TILE_ITEMS + 1];
// Smem needed for block exchange
typename BlockExchangeT::TempStorage exchange;
// Smem needed for block-wide reduction
typename BlockReduceT::TempStorage reduce;
// Smem needed for tile scanning
typename BlockScanT::TempStorage scan;
// Smem needed for tile prefix sum
typename BlockPrefixSumT::TempStorage prefix_sum;
} aliasable;
};
/// Temporary storage type (unionable)
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
_TempStorage& temp_storage; /// Reference to temp_storage
SpmvParams<ValueT, OffsetT>& spmv_params;
ValueIteratorT wd_values; ///< Wrapped pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix <b>A</b>.
RowOffsetsIteratorT wd_row_end_offsets; ///< Wrapped Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values
ColumnIndicesIteratorT wd_column_indices; ///< Wrapped Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix <b>A</b>. (Indices are zero-valued.)
VectorValueIteratorT wd_vector_x; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector <em>x</em>
VectorValueIteratorT wd_vector_y; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector <em>x</em>
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ AgentSpmv(
TempStorage& temp_storage, ///< Reference to temp_storage
SpmvParams<ValueT, OffsetT>& spmv_params) ///< SpMV input parameter bundle
:
temp_storage(temp_storage.Alias()),
spmv_params(spmv_params),
wd_values(spmv_params.d_values),
wd_row_end_offsets(spmv_params.d_row_end_offsets),
wd_column_indices(spmv_params.d_column_indices),
wd_vector_x(spmv_params.d_vector_x),
wd_vector_y(spmv_params.d_vector_y)
{}
/**
* Consume a merge tile, specialized for direct-load of nonzeros
*/
__device__ __forceinline__ KeyValuePairT ConsumeTile(
int tile_idx,
CoordinateT tile_start_coord,
CoordinateT tile_end_coord,
Int2Type<true> is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch
{
int tile_num_rows = tile_end_coord.x - tile_start_coord.x;
int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y;
OffsetT* s_tile_row_end_offsets = &temp_storage.aliasable.merge_items[0].row_end_offset;
// Gather the row end-offsets for the merge tile into shared memory
for (int item = threadIdx.x; item <= tile_num_rows; item += BLOCK_THREADS)
{
s_tile_row_end_offsets[item] = wd_row_end_offsets[tile_start_coord.x + item];
}
CTA_SYNC();
// Search for the thread's starting coordinate within the merge tile
CountingInputIterator<OffsetT> tile_nonzero_indices(tile_start_coord.y);
CoordinateT thread_start_coord;
MergePathSearch(
OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal
s_tile_row_end_offsets, // List A
tile_nonzero_indices, // List B
tile_num_rows,
tile_num_nonzeros,
thread_start_coord);
CTA_SYNC(); // Perf-sync
// Compute the thread's merge path segment
CoordinateT thread_current_coord = thread_start_coord;
KeyValuePairT scan_segment[ITEMS_PER_THREAD];
ValueT running_total = SemiringT::plus_ident();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
OffsetT nonzero_idx = CUB_MIN(tile_nonzero_indices[thread_current_coord.y], spmv_params.num_nonzeros - 1);
OffsetT column_idx = wd_column_indices[nonzero_idx];
ValueT value = wd_values[nonzero_idx];
// #if (CUB_PTX_ARCH >= 350)
// ValueT vector_value = wd_vector_x[column_idx];
// #else
// ValueT vector_value = spmv_params.t_vector_x[column_idx];
// #endif
ValueT vector_value = spmv_params.t_vector_x[column_idx];
#if (CUB_PTX_ARCH >= 350)
vector_value = wd_vector_x[column_idx];
#endif
ValueT nonzero = SemiringT::times(value, vector_value);
OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x];
if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset)
{
// Move down (accumulate)
running_total = SemiringT::plus(nonzero, running_total);
scan_segment[ITEM].value = running_total;
scan_segment[ITEM].key = tile_num_rows;
++thread_current_coord.y;
}
else
{
// Move right (reset)
scan_segment[ITEM].value = running_total;
scan_segment[ITEM].key = thread_current_coord.x;
running_total = SemiringT::plus_ident();
++thread_current_coord.x;
}
}
CTA_SYNC();
// Block-wide reduce-value-by-segment
KeyValuePairT tile_carry;
ReduceBySegmentOpT scan_op;
KeyValuePairT scan_item;
scan_item.value = running_total;
scan_item.key = thread_current_coord.x;
BlockScanT(temp_storage.aliasable.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry);
if (tile_num_rows > 0)
{
if (threadIdx.x == 0)
scan_item.key = -1;
// Direct scatter
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
if (scan_segment[ITEM].key < tile_num_rows)
{
if (scan_item.key == scan_segment[ITEM].key)
scan_segment[ITEM].value = SemiringT::plus(scan_item.value, scan_segment[ITEM].value);
if (HAS_ALPHA)
{
scan_segment[ITEM].value = SemiringT::times(scan_segment[ITEM].value, spmv_params.alpha);
}
if (HAS_BETA)
{
// Update the output vector element
ValueT addend = SemiringT::times(spmv_params.beta, wd_vector_y[tile_start_coord.x + scan_segment[ITEM].key]);
scan_segment[ITEM].value = SemiringT::plus(addend, scan_segment[ITEM].value);
}
// Set the output vector element
spmv_params.d_vector_y[tile_start_coord.x + scan_segment[ITEM].key] = scan_segment[ITEM].value;
}
}
}
// Return the tile's running carry-out
return tile_carry;
}
/**
* Consume a merge tile, specialized for indirect load of nonzeros
*/
__device__ __forceinline__ KeyValuePairT ConsumeTile(
int tile_idx,
CoordinateT tile_start_coord,
CoordinateT tile_end_coord,
Int2Type<false> is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch
{
int tile_num_rows = tile_end_coord.x - tile_start_coord.x;
int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y;
#if (CUB_PTX_ARCH >= 520)
OffsetT* s_tile_row_end_offsets = &temp_storage.aliasable.merge_items[0].row_end_offset;
ValueT* s_tile_nonzeros = &temp_storage.aliasable.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero;
// Gather the nonzeros for the merge tile into shared memory
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS);
ValueIteratorT a = wd_values + tile_start_coord.y + nonzero_idx;
ColumnIndicesIteratorT ci = wd_column_indices + tile_start_coord.y + nonzero_idx;
ValueT* s = s_tile_nonzeros + nonzero_idx;
if (nonzero_idx < tile_num_nonzeros)
{
OffsetT column_idx = *ci;
ValueT value = *a;
ValueT vector_value = spmv_params.t_vector_x[column_idx];
vector_value = wd_vector_x[column_idx];
ValueT nonzero = SemiringT::times(value, vector_value);
*s = nonzero;
}
}
#else
OffsetT* s_tile_row_end_offsets = &temp_storage.aliasable.merge_items[0].row_end_offset;
ValueT* s_tile_nonzeros = &temp_storage.aliasable.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero;
// Gather the nonzeros for the merge tile into shared memory
if (tile_num_nonzeros > 0)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS);
nonzero_idx = CUB_MIN(nonzero_idx, tile_num_nonzeros - 1);
OffsetT column_idx = wd_column_indices[tile_start_coord.y + nonzero_idx];
ValueT value = wd_values[tile_start_coord.y + nonzero_idx];
// #if (CUB_PTX_ARCH >= 350)
// ValueT vector_value = wd_vector_x[column_idx];
// #else
// ValueT vector_value = spmv_params.t_vector_x[column_idx];
// #endif
ValueT vector_value = spmv_params.t_vector_x[column_idx];
#if (CUB_PTX_ARCH >= 350)
vector_value = wd_vector_x[column_idx];
#endif
ValueT nonzero = SemiringT::times(value, vector_value);
s_tile_nonzeros[nonzero_idx] = nonzero;
}
}
#endif
// Gather the row end-offsets for the merge tile into shared memory
#pragma unroll 1
for (int item = threadIdx.x; item <= tile_num_rows; item += BLOCK_THREADS)
{
s_tile_row_end_offsets[item] = wd_row_end_offsets[tile_start_coord.x + item];
}
CTA_SYNC();
// Search for the thread's starting coordinate within the merge tile
CountingInputIterator<OffsetT> tile_nonzero_indices(tile_start_coord.y);
CoordinateT thread_start_coord;
MergePathSearch(
OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal
s_tile_row_end_offsets, // List A
tile_nonzero_indices, // List B
tile_num_rows,
tile_num_nonzeros,
thread_start_coord);
CTA_SYNC(); // Perf-sync
// Compute the thread's merge path segment
CoordinateT thread_current_coord = thread_start_coord;
KeyValuePairT scan_segment[ITEMS_PER_THREAD];
ValueT running_total = SemiringT::plus_ident();
OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x];
ValueT nonzero = s_tile_nonzeros[thread_current_coord.y];
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset)
{
// Move down (accumulate)
scan_segment[ITEM].value = nonzero;
running_total = SemiringT::plus(nonzero, running_total);
++thread_current_coord.y;
nonzero = s_tile_nonzeros[thread_current_coord.y];
}
else
{
// Move right (reset)
scan_segment[ITEM].value = SemiringT::plus_ident();
running_total = SemiringT::plus_ident();
++thread_current_coord.x;
row_end_offset = s_tile_row_end_offsets[thread_current_coord.x];
}
scan_segment[ITEM].key = thread_current_coord.x;
}
CTA_SYNC();
// Block-wide reduce-value-by-segment
KeyValuePairT tile_carry;
ReduceBySegmentOpT scan_op;
KeyValuePairT scan_item;
scan_item.value = running_total;
scan_item.key = thread_current_coord.x;
BlockScanT(temp_storage.aliasable.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry);
if (threadIdx.x == 0)
{
scan_item.key = thread_start_coord.x;
scan_item.value = SemiringT::plus_ident();
}
if (tile_num_rows > 0)
{
CTA_SYNC();
// Scan downsweep and scatter
ValueT* s_partials = &temp_storage.aliasable.merge_items[0].nonzero;
if (scan_item.key != scan_segment[0].key)
{
s_partials[scan_item.key] = scan_item.value;
}
else
{
scan_segment[0].value = SemiringT::plus(scan_item.value, scan_segment[0].value);
}
#pragma unroll
for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
if (scan_segment[ITEM - 1].key != scan_segment[ITEM].key)
{
s_partials[scan_segment[ITEM - 1].key] = scan_segment[ITEM - 1].value;
}
else
{
scan_segment[ITEM].value = SemiringT::plus(scan_segment[ITEM].value, scan_segment[ITEM - 1].value);
}
}
CTA_SYNC();
#pragma unroll 1
for (int item = threadIdx.x; item < tile_num_rows; item += BLOCK_THREADS)
{
if (HAS_ALPHA)
{
s_partials[item] = SemiringT::times(s_partials[item], spmv_params.alpha);
}
if (HAS_BETA)
{
// Update the output vector element
ValueT addend = SemiringT::times(spmv_params.beta, spmv_params.d_vector_y[tile_start_coord.x + item]);
s_partials[item] = SemiringT::plus(addend, s_partials[item]);
}
spmv_params.d_vector_y[tile_start_coord.x + item] = s_partials[item];
}
}
// Return the tile's running carry-out
return tile_carry;
}
/**
* Consume input tile
*/
__device__ __forceinline__ void ConsumeTile(
CoordinateT* d_tile_coordinates, ///< [in] Pointer to the temporary array of tile starting coordinates
KeyValuePairT* d_tile_carry_pairs, ///< [out] Pointer to the temporary array carry-out dot product row-ids, one per block
int num_merge_tiles) ///< [in] Number of merge tiles
{
int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index
if (tile_idx >= num_merge_tiles)
return;
// Read our starting coordinates
if (threadIdx.x < 2)
{
if (d_tile_coordinates == NULL)
{
// Search our starting coordinates
OffsetT diagonal = (tile_idx + threadIdx.x) * TILE_ITEMS;
CoordinateT tile_coord;
CountingInputIterator<OffsetT> nonzero_indices(0);
// Search the merge path
MergePathSearch(
diagonal,
RowOffsetsSearchIteratorT(spmv_params.d_row_end_offsets),
nonzero_indices,
spmv_params.num_rows,
spmv_params.num_nonzeros,
tile_coord);
temp_storage.tile_coords[threadIdx.x] = tile_coord;
}
else
{
temp_storage.tile_coords[threadIdx.x] = d_tile_coordinates[tile_idx + threadIdx.x];
}
}
CTA_SYNC();
CoordinateT tile_start_coord = temp_storage.tile_coords[0];
CoordinateT tile_end_coord = temp_storage.tile_coords[1];
// Consume multi-segment tile
KeyValuePairT tile_carry = ConsumeTile(
tile_idx,
tile_start_coord,
tile_end_coord,
Int2Type<AgentSpmvPolicyT::DIRECT_LOAD_NONZEROS>());
// Output the tile's carry-out
if (threadIdx.x == 0)
{
if (HAS_ALPHA)
tile_carry.value = SemiringT::times(spmv_params.alpha, tile_carry.value);
tile_carry.key += tile_start_coord.x;
d_tile_carry_pairs[tile_idx] = tile_carry;
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/single_pass_scan_operators.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Callback operator types for supplying BlockScan prefixes
*/
#pragma once
#include <iterator>
#include "../thread/thread_load.cuh"
#include "../thread/thread_store.cuh"
#include "../warp/warp_reduce.cuh"
#include "../util_arch.cuh"
#include "../util_device.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Prefix functor type for maintaining a running prefix while scanning a
* region independent of other thread blocks
******************************************************************************/
/**
* Stateful callback operator type for supplying BlockScan prefixes.
* Maintains a running prefix that can be applied to consecutive
* BlockScan operations.
*/
template <
typename T, ///< BlockScan value type
typename ScanOpT> ///< Wrapped scan operator type
struct BlockScanRunningPrefixOp
{
ScanOpT op; ///< Wrapped scan operator
T running_total; ///< Running block-wide prefix
/// Constructor
__device__ __forceinline__ BlockScanRunningPrefixOp(ScanOpT op)
:
op(op)
{}
/// Constructor
__device__ __forceinline__ BlockScanRunningPrefixOp(
T starting_prefix,
ScanOpT op)
:
op(op),
running_total(starting_prefix)
{}
/**
* Prefix callback operator. Returns the block-wide running_total in thread-0.
*/
__device__ __forceinline__ T operator()(
const T &block_aggregate) ///< The aggregate sum of the BlockScan inputs
{
T retval = running_total;
running_total = op(running_total, block_aggregate);
return retval;
}
};
/******************************************************************************
* Generic tile status interface types for block-cooperative scans
******************************************************************************/
/**
* Enumerations of tile status
*/
enum ScanTileStatus
{
SCAN_TILE_OOB, // Out-of-bounds (e.g., padding)
SCAN_TILE_INVALID = 99, // Not yet processed
SCAN_TILE_PARTIAL, // Tile aggregate is available
SCAN_TILE_INCLUSIVE, // Inclusive tile prefix is available
};
/**
* Tile status interface.
*/
template <
typename T,
bool SINGLE_WORD = Traits<T>::PRIMITIVE>
struct ScanTileState;
/**
* Tile status interface specialized for scan status and value types
* that can be combined into one machine word that can be
* read/written coherently in a single access.
*/
template <typename T>
struct ScanTileState<T, true>
{
// Status word type
typedef typename If<(sizeof(T) == 8),
long long,
typename If<(sizeof(T) == 4),
int,
typename If<(sizeof(T) == 2),
short,
char>::Type>::Type>::Type StatusWord;
// Unit word type
typedef typename If<(sizeof(T) == 8),
longlong2,
typename If<(sizeof(T) == 4),
int2,
typename If<(sizeof(T) == 2),
int,
uchar2>::Type>::Type>::Type TxnWord;
// Device word type
struct TileDescriptor
{
StatusWord status;
T value;
};
// Constants
enum
{
TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS,
};
// Device storage
TxnWord *d_tile_descriptors;
/// Constructor
__host__ __device__ __forceinline__
ScanTileState()
:
d_tile_descriptors(NULL)
{}
/// Initializer
__host__ __device__ __forceinline__
cudaError_t Init(
int /*num_tiles*/, ///< [in] Number of tiles
void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t /*temp_storage_bytes*/) ///< [in] Size in bytes of \t d_temp_storage allocation
{
d_tile_descriptors = reinterpret_cast<TxnWord*>(d_temp_storage);
return cudaSuccess;
}
/**
* Compute device memory needed for tile status
*/
__host__ __device__ __forceinline__
static cudaError_t AllocationSize(
int num_tiles, ///< [in] Number of tiles
size_t &temp_storage_bytes) ///< [out] Size in bytes of \t d_temp_storage allocation
{
temp_storage_bytes = (num_tiles + TILE_STATUS_PADDING) * sizeof(TileDescriptor); // bytes needed for tile status descriptors
return cudaSuccess;
}
/**
* Initialize (from device)
*/
__device__ __forceinline__ void InitializeStatus(int num_tiles)
{
int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
TxnWord val = TxnWord();
TileDescriptor *descriptor = reinterpret_cast<TileDescriptor*>(&val);
if (tile_idx < num_tiles)
{
// Not-yet-set
descriptor->status = StatusWord(SCAN_TILE_INVALID);
d_tile_descriptors[TILE_STATUS_PADDING + tile_idx] = val;
}
if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING))
{
// Padding
descriptor->status = StatusWord(SCAN_TILE_OOB);
d_tile_descriptors[threadIdx.x] = val;
}
}
/**
* Update the specified tile's inclusive value and corresponding status
*/
__device__ __forceinline__ void SetInclusive(int tile_idx, T tile_inclusive)
{
TileDescriptor tile_descriptor;
tile_descriptor.status = SCAN_TILE_INCLUSIVE;
tile_descriptor.value = tile_inclusive;
TxnWord alias;
*reinterpret_cast<TileDescriptor*>(&alias) = tile_descriptor;
ThreadStore<STORE_CG>(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx, alias);
}
/**
* Update the specified tile's partial value and corresponding status
*/
__device__ __forceinline__ void SetPartial(int tile_idx, T tile_partial)
{
TileDescriptor tile_descriptor;
tile_descriptor.status = SCAN_TILE_PARTIAL;
tile_descriptor.value = tile_partial;
TxnWord alias;
*reinterpret_cast<TileDescriptor*>(&alias) = tile_descriptor;
ThreadStore<STORE_CG>(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx, alias);
}
/**
* Wait for the corresponding tile to become non-invalid
*/
__device__ __forceinline__ void WaitForValid(
int tile_idx,
StatusWord &status,
T &value)
{
TileDescriptor tile_descriptor;
do
{
__threadfence_block(); // prevent hoisting loads from loop
TxnWord alias = ThreadLoad<LOAD_CG>(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx);
tile_descriptor = reinterpret_cast<TileDescriptor&>(alias);
} while (WARP_ANY((tile_descriptor.status == SCAN_TILE_INVALID), 0xffffffff));
status = tile_descriptor.status;
value = tile_descriptor.value;
}
};
/**
* Tile status interface specialized for scan status and value types that
* cannot be combined into one machine word.
*/
template <typename T>
struct ScanTileState<T, false>
{
// Status word type
typedef char StatusWord;
// Constants
enum
{
TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS,
};
// Device storage
StatusWord *d_tile_status;
T *d_tile_partial;
T *d_tile_inclusive;
/// Constructor
__host__ __device__ __forceinline__
ScanTileState()
:
d_tile_status(NULL),
d_tile_partial(NULL),
d_tile_inclusive(NULL)
{}
/// Initializer
__host__ __device__ __forceinline__
cudaError_t Init(
int num_tiles, ///< [in] Number of tiles
void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t temp_storage_bytes) ///< [in] Size in bytes of \t d_temp_storage allocation
{
cudaError_t error = cudaSuccess;
do
{
void* allocations[3] = {NULL, NULL, NULL};
size_t allocation_sizes[3];
allocation_sizes[0] = (num_tiles + TILE_STATUS_PADDING) * sizeof(StatusWord); // bytes needed for tile status descriptors
allocation_sizes[1] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized<T>); // bytes needed for partials
allocation_sizes[2] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized<T>); // bytes needed for inclusives
// Compute allocation pointers into the single storage blob
if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break;
// Alias the offsets
d_tile_status = reinterpret_cast<StatusWord*>(allocations[0]);
d_tile_partial = reinterpret_cast<T*>(allocations[1]);
d_tile_inclusive = reinterpret_cast<T*>(allocations[2]);
}
while (0);
return error;
}
/**
* Compute device memory needed for tile status
*/
__host__ __device__ __forceinline__
static cudaError_t AllocationSize(
int num_tiles, ///< [in] Number of tiles
size_t &temp_storage_bytes) ///< [out] Size in bytes of \t d_temp_storage allocation
{
// Specify storage allocation requirements
size_t allocation_sizes[3];
allocation_sizes[0] = (num_tiles + TILE_STATUS_PADDING) * sizeof(StatusWord); // bytes needed for tile status descriptors
allocation_sizes[1] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized<T>); // bytes needed for partials
allocation_sizes[2] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized<T>); // bytes needed for inclusives
// Set the necessary size of the blob
void* allocations[3];
return CubDebug(AliasTemporaries(NULL, temp_storage_bytes, allocations, allocation_sizes));
}
/**
* Initialize (from device)
*/
__device__ __forceinline__ void InitializeStatus(int num_tiles)
{
int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tile_idx < num_tiles)
{
// Not-yet-set
d_tile_status[TILE_STATUS_PADDING + tile_idx] = StatusWord(SCAN_TILE_INVALID);
}
if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING))
{
// Padding
d_tile_status[threadIdx.x] = StatusWord(SCAN_TILE_OOB);
}
}
/**
* Update the specified tile's inclusive value and corresponding status
*/
__device__ __forceinline__ void SetInclusive(int tile_idx, T tile_inclusive)
{
// Update tile inclusive value
ThreadStore<STORE_CG>(d_tile_inclusive + TILE_STATUS_PADDING + tile_idx, tile_inclusive);
// Fence
__threadfence();
// Update tile status
ThreadStore<STORE_CG>(d_tile_status + TILE_STATUS_PADDING + tile_idx, StatusWord(SCAN_TILE_INCLUSIVE));
}
/**
* Update the specified tile's partial value and corresponding status
*/
__device__ __forceinline__ void SetPartial(int tile_idx, T tile_partial)
{
// Update tile partial value
ThreadStore<STORE_CG>(d_tile_partial + TILE_STATUS_PADDING + tile_idx, tile_partial);
// Fence
__threadfence();
// Update tile status
ThreadStore<STORE_CG>(d_tile_status + TILE_STATUS_PADDING + tile_idx, StatusWord(SCAN_TILE_PARTIAL));
}
/**
* Wait for the corresponding tile to become non-invalid
*/
__device__ __forceinline__ void WaitForValid(
int tile_idx,
StatusWord &status,
T &value)
{
do {
status = ThreadLoad<LOAD_CG>(d_tile_status + TILE_STATUS_PADDING + tile_idx);
__threadfence(); // prevent hoisting loads from loop or loads below above this one
} while (status == SCAN_TILE_INVALID);
if (status == StatusWord(SCAN_TILE_PARTIAL))
value = ThreadLoad<LOAD_CG>(d_tile_partial + TILE_STATUS_PADDING + tile_idx);
else
value = ThreadLoad<LOAD_CG>(d_tile_inclusive + TILE_STATUS_PADDING + tile_idx);
}
};
/******************************************************************************
* ReduceByKey tile status interface types for block-cooperative scans
******************************************************************************/
/**
* Tile status interface for reduction by key.
*
*/
template <
typename ValueT,
typename KeyT,
bool SINGLE_WORD = (Traits<ValueT>::PRIMITIVE) && (sizeof(ValueT) + sizeof(KeyT) < 16)>
struct ReduceByKeyScanTileState;
/**
* Tile status interface for reduction by key, specialized for scan status and value types that
* cannot be combined into one machine word.
*/
template <
typename ValueT,
typename KeyT>
struct ReduceByKeyScanTileState<ValueT, KeyT, false> :
ScanTileState<KeyValuePair<KeyT, ValueT> >
{
typedef ScanTileState<KeyValuePair<KeyT, ValueT> > SuperClass;
/// Constructor
__host__ __device__ __forceinline__
ReduceByKeyScanTileState() : SuperClass() {}
};
/**
* Tile status interface for reduction by key, specialized for scan status and value types that
* can be combined into one machine word that can be read/written coherently in a single access.
*/
template <
typename ValueT,
typename KeyT>
struct ReduceByKeyScanTileState<ValueT, KeyT, true>
{
typedef KeyValuePair<KeyT, ValueT>KeyValuePairT;
// Constants
enum
{
PAIR_SIZE = sizeof(ValueT) + sizeof(KeyT),
TXN_WORD_SIZE = 1 << Log2<PAIR_SIZE + 1>::VALUE,
STATUS_WORD_SIZE = TXN_WORD_SIZE - PAIR_SIZE,
TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS,
};
// Status word type
typedef typename If<(STATUS_WORD_SIZE == 8),
long long,
typename If<(STATUS_WORD_SIZE == 4),
int,
typename If<(STATUS_WORD_SIZE == 2),
short,
char>::Type>::Type>::Type StatusWord;
// Status word type
typedef typename If<(TXN_WORD_SIZE == 16),
longlong2,
typename If<(TXN_WORD_SIZE == 8),
long long,
int>::Type>::Type TxnWord;
// Device word type (for when sizeof(ValueT) == sizeof(KeyT))
struct TileDescriptorBigStatus
{
KeyT key;
ValueT value;
StatusWord status;
};
// Device word type (for when sizeof(ValueT) != sizeof(KeyT))
struct TileDescriptorLittleStatus
{
ValueT value;
StatusWord status;
KeyT key;
};
// Device word type
typedef typename If<
(sizeof(ValueT) == sizeof(KeyT)),
TileDescriptorBigStatus,
TileDescriptorLittleStatus>::Type
TileDescriptor;
// Device storage
TxnWord *d_tile_descriptors;
/// Constructor
__host__ __device__ __forceinline__
ReduceByKeyScanTileState()
:
d_tile_descriptors(NULL)
{}
/// Initializer
__host__ __device__ __forceinline__
cudaError_t Init(
int /*num_tiles*/, ///< [in] Number of tiles
void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t /*temp_storage_bytes*/) ///< [in] Size in bytes of \t d_temp_storage allocation
{
d_tile_descriptors = reinterpret_cast<TxnWord*>(d_temp_storage);
return cudaSuccess;
}
/**
* Compute device memory needed for tile status
*/
__host__ __device__ __forceinline__
static cudaError_t AllocationSize(
int num_tiles, ///< [in] Number of tiles
size_t &temp_storage_bytes) ///< [out] Size in bytes of \t d_temp_storage allocation
{
temp_storage_bytes = (num_tiles + TILE_STATUS_PADDING) * sizeof(TileDescriptor); // bytes needed for tile status descriptors
return cudaSuccess;
}
/**
* Initialize (from device)
*/
__device__ __forceinline__ void InitializeStatus(int num_tiles)
{
int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
TxnWord val = TxnWord();
TileDescriptor *descriptor = reinterpret_cast<TileDescriptor*>(&val);
if (tile_idx < num_tiles)
{
// Not-yet-set
descriptor->status = StatusWord(SCAN_TILE_INVALID);
d_tile_descriptors[TILE_STATUS_PADDING + tile_idx] = val;
}
if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING))
{
// Padding
descriptor->status = StatusWord(SCAN_TILE_OOB);
d_tile_descriptors[threadIdx.x] = val;
}
}
/**
* Update the specified tile's inclusive value and corresponding status
*/
__device__ __forceinline__ void SetInclusive(int tile_idx, KeyValuePairT tile_inclusive)
{
TileDescriptor tile_descriptor;
tile_descriptor.status = SCAN_TILE_INCLUSIVE;
tile_descriptor.value = tile_inclusive.value;
tile_descriptor.key = tile_inclusive.key;
TxnWord alias;
*reinterpret_cast<TileDescriptor*>(&alias) = tile_descriptor;
ThreadStore<STORE_CG>(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx, alias);
}
/**
* Update the specified tile's partial value and corresponding status
*/
__device__ __forceinline__ void SetPartial(int tile_idx, KeyValuePairT tile_partial)
{
TileDescriptor tile_descriptor;
tile_descriptor.status = SCAN_TILE_PARTIAL;
tile_descriptor.value = tile_partial.value;
tile_descriptor.key = tile_partial.key;
TxnWord alias;
*reinterpret_cast<TileDescriptor*>(&alias) = tile_descriptor;
ThreadStore<STORE_CG>(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx, alias);
}
/**
* Wait for the corresponding tile to become non-invalid
*/
__device__ __forceinline__ void WaitForValid(
int tile_idx,
StatusWord &status,
KeyValuePairT &value)
{
// TxnWord alias = ThreadLoad<LOAD_CG>(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx);
// TileDescriptor tile_descriptor = reinterpret_cast<TileDescriptor&>(alias);
//
// while (tile_descriptor.status == SCAN_TILE_INVALID)
// {
// __threadfence_block(); // prevent hoisting loads from loop
//
// alias = ThreadLoad<LOAD_CG>(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx);
// tile_descriptor = reinterpret_cast<TileDescriptor&>(alias);
// }
//
// status = tile_descriptor.status;
// value.value = tile_descriptor.value;
// value.key = tile_descriptor.key;
TileDescriptor tile_descriptor;
do
{
__threadfence_block(); // prevent hoisting loads from loop
TxnWord alias = ThreadLoad<LOAD_CG>(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx);
tile_descriptor = reinterpret_cast<TileDescriptor&>(alias);
} while (WARP_ANY((tile_descriptor.status == SCAN_TILE_INVALID), 0xffffffff));
status = tile_descriptor.status;
value.value = tile_descriptor.value;
value.key = tile_descriptor.key;
}
};
/******************************************************************************
* Prefix call-back operator for coupling local block scan within a
* block-cooperative scan
******************************************************************************/
/**
* Stateful block-scan prefix functor. Provides the the running prefix for
* the current tile by using the call-back warp to wait on on
* aggregates/prefixes from predecessor tiles to become available.
*/
template <
typename T,
typename ScanOpT,
typename ScanTileStateT,
int PTX_ARCH = CUB_PTX_ARCH>
struct TilePrefixCallbackOp
{
// Parameterized warp reduce
typedef WarpReduce<T, CUB_PTX_WARP_THREADS, PTX_ARCH> WarpReduceT;
// Temporary storage type
struct _TempStorage
{
typename WarpReduceT::TempStorage warp_reduce;
T exclusive_prefix;
T inclusive_prefix;
T block_aggregate;
};
// Alias wrapper allowing temporary storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
// Type of status word
typedef typename ScanTileStateT::StatusWord StatusWord;
// Fields
_TempStorage& temp_storage; ///< Reference to a warp-reduction instance
ScanTileStateT& tile_status; ///< Interface to tile status
ScanOpT scan_op; ///< Binary scan operator
int tile_idx; ///< The current tile index
T exclusive_prefix; ///< Exclusive prefix for the tile
T inclusive_prefix; ///< Inclusive prefix for the tile
// Constructor
__device__ __forceinline__
TilePrefixCallbackOp(
ScanTileStateT &tile_status,
TempStorage &temp_storage,
ScanOpT scan_op,
int tile_idx)
:
temp_storage(temp_storage.Alias()),
tile_status(tile_status),
scan_op(scan_op),
tile_idx(tile_idx) {}
// Block until all predecessors within the warp-wide window have non-invalid status
__device__ __forceinline__
void ProcessWindow(
int predecessor_idx, ///< Preceding tile index to inspect
StatusWord &predecessor_status, ///< [out] Preceding tile status
T &window_aggregate) ///< [out] Relevant partial reduction from this window of preceding tiles
{
T value;
tile_status.WaitForValid(predecessor_idx, predecessor_status, value);
// Perform a segmented reduction to get the prefix for the current window.
// Use the swizzled scan operator because we are now scanning *down* towards thread0.
int tail_flag = (predecessor_status == StatusWord(SCAN_TILE_INCLUSIVE));
window_aggregate = WarpReduceT(temp_storage.warp_reduce).TailSegmentedReduce(
value,
tail_flag,
SwizzleScanOp<ScanOpT>(scan_op));
}
// BlockScan prefix callback functor (called by the first warp)
__device__ __forceinline__
T operator()(T block_aggregate)
{
// Update our status with our tile-aggregate
if (threadIdx.x == 0)
{
temp_storage.block_aggregate = block_aggregate;
tile_status.SetPartial(tile_idx, block_aggregate);
}
int predecessor_idx = tile_idx - threadIdx.x - 1;
StatusWord predecessor_status;
T window_aggregate;
// Wait for the warp-wide window of predecessor tiles to become valid
ProcessWindow(predecessor_idx, predecessor_status, window_aggregate);
// The exclusive tile prefix starts out as the current window aggregate
exclusive_prefix = window_aggregate;
// Keep sliding the window back until we come across a tile whose inclusive prefix is known
while (WARP_ALL((predecessor_status != StatusWord(SCAN_TILE_INCLUSIVE)), 0xffffffff))
{
predecessor_idx -= CUB_PTX_WARP_THREADS;
// Update exclusive tile prefix with the window prefix
ProcessWindow(predecessor_idx, predecessor_status, window_aggregate);
exclusive_prefix = scan_op(window_aggregate, exclusive_prefix);
}
// Compute the inclusive tile prefix and update the status for this tile
if (threadIdx.x == 0)
{
inclusive_prefix = scan_op(exclusive_prefix, block_aggregate);
tile_status.SetInclusive(tile_idx, inclusive_prefix);
temp_storage.exclusive_prefix = exclusive_prefix;
temp_storage.inclusive_prefix = inclusive_prefix;
}
// Return exclusive_prefix
return exclusive_prefix;
}
// Get the exclusive prefix stored in temporary storage
__device__ __forceinline__
T GetExclusivePrefix()
{
return temp_storage.exclusive_prefix;
}
// Get the inclusive prefix stored in temporary storage
__device__ __forceinline__
T GetInclusivePrefix()
{
return temp_storage.inclusive_prefix;
}
// Get the block aggregate stored in temporary storage
__device__ __forceinline__
T GetBlockAggregate()
{
return temp_storage.block_aggregate;
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_rle.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::AgentRle implements a stateful abstraction of CUDA thread blocks for participating in device-wide run-length-encode.
*/
#pragma once
#include <iterator>
#include "single_pass_scan_operators.cuh"
#include "../block/block_load.cuh"
#include "../block/block_store.cuh"
#include "../block/block_scan.cuh"
#include "../block/block_exchange.cuh"
#include "../block/block_discontinuity.cuh"
#include "../grid/grid_queue.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../iterator/constant_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Parameterizable tuning policy type for AgentRle
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use
CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements
bool _STORE_WARP_TIME_SLICING, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any store-related data transpositions (versus each warp having its own storage)
BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use
struct AgentRlePolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
STORE_WARP_TIME_SLICING = _STORE_WARP_TIME_SLICING, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any store-related data transpositions (versus each warp having its own storage)
};
static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use
static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements
static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief AgentRle implements a stateful abstraction of CUDA thread blocks for participating in device-wide run-length-encode
*/
template <
typename AgentRlePolicyT, ///< Parameterized AgentRlePolicyT tuning policy type
typename InputIteratorT, ///< Random-access input iterator type for data
typename OffsetsOutputIteratorT, ///< Random-access output iterator type for offset values
typename LengthsOutputIteratorT, ///< Random-access output iterator type for length values
typename EqualityOpT, ///< T equality operator type
typename OffsetT> ///< Signed integer type for global offsets
struct AgentRle
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
/// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type T;
/// The lengths output value type
typedef typename If<(Equals<typename std::iterator_traits<LengthsOutputIteratorT>::value_type, void>::VALUE), // LengthT = (if output iterator's value type is void) ?
OffsetT, // ... then the OffsetT type,
typename std::iterator_traits<LengthsOutputIteratorT>::value_type>::Type LengthT; // ... else the output iterator's value type
/// Tuple type for scanning (pairs run-length and run-index)
typedef KeyValuePair<OffsetT, LengthT> LengthOffsetPair;
/// Tile status descriptor interface type
typedef ReduceByKeyScanTileState<LengthT, OffsetT> ScanTileStateT;
// Constants
enum
{
WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH),
BLOCK_THREADS = AgentRlePolicyT::BLOCK_THREADS,
ITEMS_PER_THREAD = AgentRlePolicyT::ITEMS_PER_THREAD,
WARP_ITEMS = WARP_THREADS * ITEMS_PER_THREAD,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS,
/// Whether or not to sync after loading data
SYNC_AFTER_LOAD = (AgentRlePolicyT::LOAD_ALGORITHM != BLOCK_LOAD_DIRECT),
/// Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any store-related data transpositions (versus each warp having its own storage)
STORE_WARP_TIME_SLICING = AgentRlePolicyT::STORE_WARP_TIME_SLICING,
ACTIVE_EXCHANGE_WARPS = (STORE_WARP_TIME_SLICING) ? 1 : WARPS,
};
/**
* Special operator that signals all out-of-bounds items are not equal to everything else,
* forcing both (1) the last item to be tail-flagged and (2) all oob items to be marked
* trivial.
*/
template <bool LAST_TILE>
struct OobInequalityOp
{
OffsetT num_remaining;
EqualityOpT equality_op;
__device__ __forceinline__ OobInequalityOp(
OffsetT num_remaining,
EqualityOpT equality_op)
:
num_remaining(num_remaining),
equality_op(equality_op)
{}
template <typename Index>
__host__ __device__ __forceinline__ bool operator()(T first, T second, Index idx)
{
if (!LAST_TILE || (idx < num_remaining))
return !equality_op(first, second);
else
return true;
}
};
// Cache-modified Input iterator wrapper type (for applying cache modifier) for data
typedef typename If<IsPointer<InputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentRlePolicyT::LOAD_MODIFIER, T, OffsetT>, // Wrap the native input pointer with CacheModifiedVLengthnputIterator
InputIteratorT>::Type // Directly use the supplied input iterator type
WrappedInputIteratorT;
// Parameterized BlockLoad type for data
typedef BlockLoad<
T,
AgentRlePolicyT::BLOCK_THREADS,
AgentRlePolicyT::ITEMS_PER_THREAD,
AgentRlePolicyT::LOAD_ALGORITHM>
BlockLoadT;
// Parameterized BlockDiscontinuity type for data
typedef BlockDiscontinuity<T, BLOCK_THREADS> BlockDiscontinuityT;
// Parameterized WarpScan type
typedef WarpScan<LengthOffsetPair> WarpScanPairs;
// Reduce-length-by-run scan operator
typedef ReduceBySegmentOp<cub::Sum> ReduceBySegmentOpT;
// Callback type for obtaining tile prefix during block scan
typedef TilePrefixCallbackOp<
LengthOffsetPair,
ReduceBySegmentOpT,
ScanTileStateT>
TilePrefixCallbackOpT;
// Warp exchange types
typedef WarpExchange<LengthOffsetPair, ITEMS_PER_THREAD> WarpExchangePairs;
typedef typename If<STORE_WARP_TIME_SLICING, typename WarpExchangePairs::TempStorage, NullType>::Type WarpExchangePairsStorage;
typedef WarpExchange<OffsetT, ITEMS_PER_THREAD> WarpExchangeOffsets;
typedef WarpExchange<LengthT, ITEMS_PER_THREAD> WarpExchangeLengths;
typedef LengthOffsetPair WarpAggregates[WARPS];
// Shared memory type for this thread block
struct _TempStorage
{
// Aliasable storage layout
union Aliasable
{
struct
{
typename BlockDiscontinuityT::TempStorage discontinuity; // Smem needed for discontinuity detection
typename WarpScanPairs::TempStorage warp_scan[WARPS]; // Smem needed for warp-synchronous scans
Uninitialized<LengthOffsetPair[WARPS]> warp_aggregates; // Smem needed for sharing warp-wide aggregates
typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback
};
// Smem needed for input loading
typename BlockLoadT::TempStorage load;
// Aliasable layout needed for two-phase scatter
union ScatterAliasable
{
unsigned long long align;
WarpExchangePairsStorage exchange_pairs[ACTIVE_EXCHANGE_WARPS];
typename WarpExchangeOffsets::TempStorage exchange_offsets[ACTIVE_EXCHANGE_WARPS];
typename WarpExchangeLengths::TempStorage exchange_lengths[ACTIVE_EXCHANGE_WARPS];
} scatter_aliasable;
} aliasable;
OffsetT tile_idx; // Shared tile index
LengthOffsetPair tile_inclusive; // Inclusive tile prefix
LengthOffsetPair tile_exclusive; // Exclusive tile prefix
};
// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
_TempStorage& temp_storage; ///< Reference to temp_storage
WrappedInputIteratorT d_in; ///< Pointer to input sequence of data items
OffsetsOutputIteratorT d_offsets_out; ///< Input run offsets
LengthsOutputIteratorT d_lengths_out; ///< Output run lengths
EqualityOpT equality_op; ///< T equality operator
ReduceBySegmentOpT scan_op; ///< Reduce-length-by-flag scan operator
OffsetT num_items; ///< Total number of input items
//---------------------------------------------------------------------
// Constructor
//---------------------------------------------------------------------
// Constructor
__device__ __forceinline__
AgentRle(
TempStorage &temp_storage, ///< [in] Reference to temp_storage
InputIteratorT d_in, ///< [in] Pointer to input sequence of data items
OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to output sequence of run offsets
LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to output sequence of run lengths
EqualityOpT equality_op, ///< [in] T equality operator
OffsetT num_items) ///< [in] Total number of input items
:
temp_storage(temp_storage.Alias()),
d_in(d_in),
d_offsets_out(d_offsets_out),
d_lengths_out(d_lengths_out),
equality_op(equality_op),
scan_op(cub::Sum()),
num_items(num_items)
{}
//---------------------------------------------------------------------
// Utility methods for initializing the selections
//---------------------------------------------------------------------
template <bool FIRST_TILE, bool LAST_TILE>
__device__ __forceinline__ void InitializeSelections(
OffsetT tile_offset,
OffsetT num_remaining,
T (&items)[ITEMS_PER_THREAD],
LengthOffsetPair (&lengths_and_num_runs)[ITEMS_PER_THREAD])
{
bool head_flags[ITEMS_PER_THREAD];
bool tail_flags[ITEMS_PER_THREAD];
OobInequalityOp<LAST_TILE> inequality_op(num_remaining, equality_op);
if (FIRST_TILE && LAST_TILE)
{
// First-and-last-tile always head-flags the first item and tail-flags the last item
BlockDiscontinuityT(temp_storage.aliasable.discontinuity).FlagHeadsAndTails(
head_flags, tail_flags, items, inequality_op);
}
else if (FIRST_TILE)
{
// First-tile always head-flags the first item
// Get the first item from the next tile
T tile_successor_item;
if (threadIdx.x == BLOCK_THREADS - 1)
tile_successor_item = d_in[tile_offset + TILE_ITEMS];
BlockDiscontinuityT(temp_storage.aliasable.discontinuity).FlagHeadsAndTails(
head_flags, tail_flags, tile_successor_item, items, inequality_op);
}
else if (LAST_TILE)
{
// Last-tile always flags the last item
// Get the last item from the previous tile
T tile_predecessor_item;
if (threadIdx.x == 0)
tile_predecessor_item = d_in[tile_offset - 1];
BlockDiscontinuityT(temp_storage.aliasable.discontinuity).FlagHeadsAndTails(
head_flags, tile_predecessor_item, tail_flags, items, inequality_op);
}
else
{
// Get the first item from the next tile
T tile_successor_item;
if (threadIdx.x == BLOCK_THREADS - 1)
tile_successor_item = d_in[tile_offset + TILE_ITEMS];
// Get the last item from the previous tile
T tile_predecessor_item;
if (threadIdx.x == 0)
tile_predecessor_item = d_in[tile_offset - 1];
BlockDiscontinuityT(temp_storage.aliasable.discontinuity).FlagHeadsAndTails(
head_flags, tile_predecessor_item, tail_flags, tile_successor_item, items, inequality_op);
}
// Zip counts and runs
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
lengths_and_num_runs[ITEM].key = head_flags[ITEM] && (!tail_flags[ITEM]);
lengths_and_num_runs[ITEM].value = ((!head_flags[ITEM]) || (!tail_flags[ITEM]));
}
}
//---------------------------------------------------------------------
// Scan utility methods
//---------------------------------------------------------------------
/**
* Scan of allocations
*/
__device__ __forceinline__ void WarpScanAllocations(
LengthOffsetPair &tile_aggregate,
LengthOffsetPair &warp_aggregate,
LengthOffsetPair &warp_exclusive_in_tile,
LengthOffsetPair &thread_exclusive_in_warp,
LengthOffsetPair (&lengths_and_num_runs)[ITEMS_PER_THREAD])
{
// Perform warpscans
unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS);
int lane_id = LaneId();
LengthOffsetPair identity;
identity.key = 0;
identity.value = 0;
LengthOffsetPair thread_inclusive;
LengthOffsetPair thread_aggregate = internal::ThreadReduce(lengths_and_num_runs, scan_op);
WarpScanPairs(temp_storage.aliasable.warp_scan[warp_id]).Scan(
thread_aggregate,
thread_inclusive,
thread_exclusive_in_warp,
identity,
scan_op);
// Last lane in each warp shares its warp-aggregate
if (lane_id == WARP_THREADS - 1)
temp_storage.aliasable.warp_aggregates.Alias()[warp_id] = thread_inclusive;
CTA_SYNC();
// Accumulate total selected and the warp-wide prefix
warp_exclusive_in_tile = identity;
warp_aggregate = temp_storage.aliasable.warp_aggregates.Alias()[warp_id];
tile_aggregate = temp_storage.aliasable.warp_aggregates.Alias()[0];
#pragma unroll
for (int WARP = 1; WARP < WARPS; ++WARP)
{
if (warp_id == WARP)
warp_exclusive_in_tile = tile_aggregate;
tile_aggregate = scan_op(tile_aggregate, temp_storage.aliasable.warp_aggregates.Alias()[WARP]);
}
}
//---------------------------------------------------------------------
// Utility methods for scattering selections
//---------------------------------------------------------------------
/**
* Two-phase scatter, specialized for warp time-slicing
*/
template <bool FIRST_TILE>
__device__ __forceinline__ void ScatterTwoPhase(
OffsetT tile_num_runs_exclusive_in_global,
OffsetT warp_num_runs_aggregate,
OffsetT warp_num_runs_exclusive_in_tile,
OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD],
LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD],
Int2Type<true> is_warp_time_slice)
{
unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS);
int lane_id = LaneId();
// Locally compact items within the warp (first warp)
if (warp_id == 0)
{
WarpExchangePairs(temp_storage.aliasable.scatter_aliasable.exchange_pairs[0]).ScatterToStriped(
lengths_and_offsets, thread_num_runs_exclusive_in_warp);
}
// Locally compact items within the warp (remaining warps)
#pragma unroll
for (int SLICE = 1; SLICE < WARPS; ++SLICE)
{
CTA_SYNC();
if (warp_id == SLICE)
{
WarpExchangePairs(temp_storage.aliasable.scatter_aliasable.exchange_pairs[0]).ScatterToStriped(
lengths_and_offsets, thread_num_runs_exclusive_in_warp);
}
}
// Global scatter
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
if ((ITEM * WARP_THREADS) < warp_num_runs_aggregate - lane_id)
{
OffsetT item_offset =
tile_num_runs_exclusive_in_global +
warp_num_runs_exclusive_in_tile +
(ITEM * WARP_THREADS) + lane_id;
// Scatter offset
d_offsets_out[item_offset] = lengths_and_offsets[ITEM].key;
// Scatter length if not the first (global) length
if ((!FIRST_TILE) || (ITEM != 0) || (threadIdx.x > 0))
{
d_lengths_out[item_offset - 1] = lengths_and_offsets[ITEM].value;
}
}
}
}
/**
* Two-phase scatter
*/
template <bool FIRST_TILE>
__device__ __forceinline__ void ScatterTwoPhase(
OffsetT tile_num_runs_exclusive_in_global,
OffsetT warp_num_runs_aggregate,
OffsetT warp_num_runs_exclusive_in_tile,
OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD],
LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD],
Int2Type<false> is_warp_time_slice)
{
unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS);
int lane_id = LaneId();
// Unzip
OffsetT run_offsets[ITEMS_PER_THREAD];
LengthT run_lengths[ITEMS_PER_THREAD];
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
run_offsets[ITEM] = lengths_and_offsets[ITEM].key;
run_lengths[ITEM] = lengths_and_offsets[ITEM].value;
}
WarpExchangeOffsets(temp_storage.aliasable.scatter_aliasable.exchange_offsets[warp_id]).ScatterToStriped(
run_offsets, thread_num_runs_exclusive_in_warp);
WARP_SYNC(0xffffffff);
WarpExchangeLengths(temp_storage.aliasable.scatter_aliasable.exchange_lengths[warp_id]).ScatterToStriped(
run_lengths, thread_num_runs_exclusive_in_warp);
// Global scatter
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
if ((ITEM * WARP_THREADS) + lane_id < warp_num_runs_aggregate)
{
OffsetT item_offset =
tile_num_runs_exclusive_in_global +
warp_num_runs_exclusive_in_tile +
(ITEM * WARP_THREADS) + lane_id;
// Scatter offset
d_offsets_out[item_offset] = run_offsets[ITEM];
// Scatter length if not the first (global) length
if ((!FIRST_TILE) || (ITEM != 0) || (threadIdx.x > 0))
{
d_lengths_out[item_offset - 1] = run_lengths[ITEM];
}
}
}
}
/**
* Direct scatter
*/
template <bool FIRST_TILE>
__device__ __forceinline__ void ScatterDirect(
OffsetT tile_num_runs_exclusive_in_global,
OffsetT warp_num_runs_aggregate,
OffsetT warp_num_runs_exclusive_in_tile,
OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD],
LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD])
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
if (thread_num_runs_exclusive_in_warp[ITEM] < warp_num_runs_aggregate)
{
OffsetT item_offset =
tile_num_runs_exclusive_in_global +
warp_num_runs_exclusive_in_tile +
thread_num_runs_exclusive_in_warp[ITEM];
// Scatter offset
d_offsets_out[item_offset] = lengths_and_offsets[ITEM].key;
// Scatter length if not the first (global) length
if (item_offset >= 1)
{
d_lengths_out[item_offset - 1] = lengths_and_offsets[ITEM].value;
}
}
}
}
/**
* Scatter
*/
template <bool FIRST_TILE>
__device__ __forceinline__ void Scatter(
OffsetT tile_num_runs_aggregate,
OffsetT tile_num_runs_exclusive_in_global,
OffsetT warp_num_runs_aggregate,
OffsetT warp_num_runs_exclusive_in_tile,
OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD],
LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD])
{
if ((ITEMS_PER_THREAD == 1) || (tile_num_runs_aggregate < BLOCK_THREADS))
{
// Direct scatter if the warp has any items
if (warp_num_runs_aggregate)
{
ScatterDirect<FIRST_TILE>(
tile_num_runs_exclusive_in_global,
warp_num_runs_aggregate,
warp_num_runs_exclusive_in_tile,
thread_num_runs_exclusive_in_warp,
lengths_and_offsets);
}
}
else
{
// Scatter two phase
ScatterTwoPhase<FIRST_TILE>(
tile_num_runs_exclusive_in_global,
warp_num_runs_aggregate,
warp_num_runs_exclusive_in_tile,
thread_num_runs_exclusive_in_warp,
lengths_and_offsets,
Int2Type<STORE_WARP_TIME_SLICING>());
}
}
//---------------------------------------------------------------------
// Cooperatively scan a device-wide sequence of tiles with other CTAs
//---------------------------------------------------------------------
/**
* Process a tile of input (dynamic chained scan)
*/
template <
bool LAST_TILE>
__device__ __forceinline__ LengthOffsetPair ConsumeTile(
OffsetT num_items, ///< Total number of global input items
OffsetT num_remaining, ///< Number of global input items remaining (including this tile)
int tile_idx, ///< Tile index
OffsetT tile_offset, ///< Tile offset
ScanTileStateT &tile_status) ///< Global list of tile status
{
if (tile_idx == 0)
{
// First tile
// Load items
T items[ITEMS_PER_THREAD];
if (LAST_TILE)
BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items, num_remaining, T());
else
BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items);
if (SYNC_AFTER_LOAD)
CTA_SYNC();
// Set flags
LengthOffsetPair lengths_and_num_runs[ITEMS_PER_THREAD];
InitializeSelections<true, LAST_TILE>(
tile_offset,
num_remaining,
items,
lengths_and_num_runs);
// Exclusive scan of lengths and runs
LengthOffsetPair tile_aggregate;
LengthOffsetPair warp_aggregate;
LengthOffsetPair warp_exclusive_in_tile;
LengthOffsetPair thread_exclusive_in_warp;
WarpScanAllocations(
tile_aggregate,
warp_aggregate,
warp_exclusive_in_tile,
thread_exclusive_in_warp,
lengths_and_num_runs);
// Update tile status if this is not the last tile
if (!LAST_TILE && (threadIdx.x == 0))
tile_status.SetInclusive(0, tile_aggregate);
// Update thread_exclusive_in_warp to fold in warp run-length
if (thread_exclusive_in_warp.key == 0)
thread_exclusive_in_warp.value += warp_exclusive_in_tile.value;
LengthOffsetPair lengths_and_offsets[ITEMS_PER_THREAD];
OffsetT thread_num_runs_exclusive_in_warp[ITEMS_PER_THREAD];
LengthOffsetPair lengths_and_num_runs2[ITEMS_PER_THREAD];
// Downsweep scan through lengths_and_num_runs
internal::ThreadScanExclusive(lengths_and_num_runs, lengths_and_num_runs2, scan_op, thread_exclusive_in_warp);
// Zip
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
lengths_and_offsets[ITEM].value = lengths_and_num_runs2[ITEM].value;
lengths_and_offsets[ITEM].key = tile_offset + (threadIdx.x * ITEMS_PER_THREAD) + ITEM;
thread_num_runs_exclusive_in_warp[ITEM] = (lengths_and_num_runs[ITEM].key) ?
lengths_and_num_runs2[ITEM].key : // keep
WARP_THREADS * ITEMS_PER_THREAD; // discard
}
OffsetT tile_num_runs_aggregate = tile_aggregate.key;
OffsetT tile_num_runs_exclusive_in_global = 0;
OffsetT warp_num_runs_aggregate = warp_aggregate.key;
OffsetT warp_num_runs_exclusive_in_tile = warp_exclusive_in_tile.key;
// Scatter
Scatter<true>(
tile_num_runs_aggregate,
tile_num_runs_exclusive_in_global,
warp_num_runs_aggregate,
warp_num_runs_exclusive_in_tile,
thread_num_runs_exclusive_in_warp,
lengths_and_offsets);
// Return running total (inclusive of this tile)
return tile_aggregate;
}
else
{
// Not first tile
// Load items
T items[ITEMS_PER_THREAD];
if (LAST_TILE)
BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items, num_remaining, T());
else
BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items);
if (SYNC_AFTER_LOAD)
CTA_SYNC();
// Set flags
LengthOffsetPair lengths_and_num_runs[ITEMS_PER_THREAD];
InitializeSelections<false, LAST_TILE>(
tile_offset,
num_remaining,
items,
lengths_and_num_runs);
// Exclusive scan of lengths and runs
LengthOffsetPair tile_aggregate;
LengthOffsetPair warp_aggregate;
LengthOffsetPair warp_exclusive_in_tile;
LengthOffsetPair thread_exclusive_in_warp;
WarpScanAllocations(
tile_aggregate,
warp_aggregate,
warp_exclusive_in_tile,
thread_exclusive_in_warp,
lengths_and_num_runs);
// First warp computes tile prefix in lane 0
TilePrefixCallbackOpT prefix_op(tile_status, temp_storage.aliasable.prefix, Sum(), tile_idx);
unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS);
if (warp_id == 0)
{
prefix_op(tile_aggregate);
if (threadIdx.x == 0)
temp_storage.tile_exclusive = prefix_op.exclusive_prefix;
}
CTA_SYNC();
LengthOffsetPair tile_exclusive_in_global = temp_storage.tile_exclusive;
// Update thread_exclusive_in_warp to fold in warp and tile run-lengths
LengthOffsetPair thread_exclusive = scan_op(tile_exclusive_in_global, warp_exclusive_in_tile);
if (thread_exclusive_in_warp.key == 0)
thread_exclusive_in_warp.value += thread_exclusive.value;
// Downsweep scan through lengths_and_num_runs
LengthOffsetPair lengths_and_num_runs2[ITEMS_PER_THREAD];
LengthOffsetPair lengths_and_offsets[ITEMS_PER_THREAD];
OffsetT thread_num_runs_exclusive_in_warp[ITEMS_PER_THREAD];
internal::ThreadScanExclusive(lengths_and_num_runs, lengths_and_num_runs2, scan_op, thread_exclusive_in_warp);
// Zip
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
lengths_and_offsets[ITEM].value = lengths_and_num_runs2[ITEM].value;
lengths_and_offsets[ITEM].key = tile_offset + (threadIdx.x * ITEMS_PER_THREAD) + ITEM;
thread_num_runs_exclusive_in_warp[ITEM] = (lengths_and_num_runs[ITEM].key) ?
lengths_and_num_runs2[ITEM].key : // keep
WARP_THREADS * ITEMS_PER_THREAD; // discard
}
OffsetT tile_num_runs_aggregate = tile_aggregate.key;
OffsetT tile_num_runs_exclusive_in_global = tile_exclusive_in_global.key;
OffsetT warp_num_runs_aggregate = warp_aggregate.key;
OffsetT warp_num_runs_exclusive_in_tile = warp_exclusive_in_tile.key;
// Scatter
Scatter<false>(
tile_num_runs_aggregate,
tile_num_runs_exclusive_in_global,
warp_num_runs_aggregate,
warp_num_runs_exclusive_in_tile,
thread_num_runs_exclusive_in_warp,
lengths_and_offsets);
// Return running total (inclusive of this tile)
return prefix_op.inclusive_prefix;
}
}
/**
* Scan tiles of items as part of a dynamic chained scan
*/
template <typename NumRunsIteratorT> ///< Output iterator type for recording number of items selected
__device__ __forceinline__ void ConsumeRange(
int num_tiles, ///< Total number of input tiles
ScanTileStateT& tile_status, ///< Global list of tile status
NumRunsIteratorT d_num_runs_out) ///< Output pointer for total number of runs identified
{
// Blocks are launched in increasing order, so just assign one tile per block
int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index
OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile
OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile)
if (tile_idx < num_tiles - 1)
{
// Not the last tile (full)
ConsumeTile<false>(num_items, num_remaining, tile_idx, tile_offset, tile_status);
}
else if (num_remaining > 0)
{
// The last tile (possibly partially-full)
LengthOffsetPair running_total = ConsumeTile<true>(num_items, num_remaining, tile_idx, tile_offset, tile_status);
if (threadIdx.x == 0)
{
// Output the total number of items selected
*d_num_runs_out = running_total.key;
// The inclusive prefix contains accumulated length reduction for the last run
if (running_total.key > 0)
d_lengths_out[running_total.key - 1] = running_total.value;
}
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_radix_sort_upsweep.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* AgentRadixSortUpsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort upsweep .
*/
#pragma once
#include "../thread/thread_reduce.cuh"
#include "../thread/thread_load.cuh"
#include "../warp/warp_reduce.cuh"
#include "../block/block_load.cuh"
#include "../util_type.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Parameterizable tuning policy type for AgentRadixSortUpsweep
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading keys
int _RADIX_BITS> ///< The number of radix bits, i.e., log2(bins)
struct AgentRadixSortUpsweepPolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
RADIX_BITS = _RADIX_BITS, ///< The number of radix bits, i.e., log2(bins)
};
static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading keys
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief AgentRadixSortUpsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort upsweep .
*/
template <
typename AgentRadixSortUpsweepPolicy, ///< Parameterized AgentRadixSortUpsweepPolicy tuning policy type
typename KeyT, ///< KeyT type
typename OffsetT> ///< Signed integer type for global offsets
struct AgentRadixSortUpsweep
{
//---------------------------------------------------------------------
// Type definitions and constants
//---------------------------------------------------------------------
typedef typename Traits<KeyT>::UnsignedBits UnsignedBits;
// Integer type for digit counters (to be packed into words of PackedCounters)
typedef unsigned char DigitCounter;
// Integer type for packing DigitCounters into columns of shared memory banks
typedef unsigned int PackedCounter;
static const CacheLoadModifier LOAD_MODIFIER = AgentRadixSortUpsweepPolicy::LOAD_MODIFIER;
enum
{
RADIX_BITS = AgentRadixSortUpsweepPolicy::RADIX_BITS,
BLOCK_THREADS = AgentRadixSortUpsweepPolicy::BLOCK_THREADS,
KEYS_PER_THREAD = AgentRadixSortUpsweepPolicy::ITEMS_PER_THREAD,
RADIX_DIGITS = 1 << RADIX_BITS,
LOG_WARP_THREADS = CUB_PTX_LOG_WARP_THREADS,
WARP_THREADS = 1 << LOG_WARP_THREADS,
WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS,
TILE_ITEMS = BLOCK_THREADS * KEYS_PER_THREAD,
BYTES_PER_COUNTER = sizeof(DigitCounter),
LOG_BYTES_PER_COUNTER = Log2<BYTES_PER_COUNTER>::VALUE,
PACKING_RATIO = sizeof(PackedCounter) / sizeof(DigitCounter),
LOG_PACKING_RATIO = Log2<PACKING_RATIO>::VALUE,
LOG_COUNTER_LANES = CUB_MAX(0, RADIX_BITS - LOG_PACKING_RATIO),
COUNTER_LANES = 1 << LOG_COUNTER_LANES,
// To prevent counter overflow, we must periodically unpack and aggregate the
// digit counters back into registers. Each counter lane is assigned to a
// warp for aggregation.
LANES_PER_WARP = CUB_MAX(1, (COUNTER_LANES + WARPS - 1) / WARPS),
// Unroll tiles in batches without risk of counter overflow
UNROLL_COUNT = CUB_MIN(64, 255 / KEYS_PER_THREAD),
UNROLLED_ELEMENTS = UNROLL_COUNT * TILE_ITEMS,
};
// Input iterator wrapper type (for applying cache modifier)s
typedef CacheModifiedInputIterator<LOAD_MODIFIER, UnsignedBits, OffsetT> KeysItr;
/**
* Shared memory storage layout
*/
union __align__(16) _TempStorage
{
DigitCounter thread_counters[COUNTER_LANES][BLOCK_THREADS][PACKING_RATIO];
PackedCounter packed_thread_counters[COUNTER_LANES][BLOCK_THREADS];
OffsetT block_counters[WARP_THREADS][RADIX_DIGITS];
};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Thread fields (aggregate state bundle)
//---------------------------------------------------------------------
// Shared storage for this CTA
_TempStorage &temp_storage;
// Thread-local counters for periodically aggregating composite-counter lanes
OffsetT local_counts[LANES_PER_WARP][PACKING_RATIO];
// Input and output device pointers
KeysItr d_keys_in;
// The least-significant bit position of the current digit to extract
int current_bit;
// Number of bits in current digit
int num_bits;
//---------------------------------------------------------------------
// Helper structure for templated iteration
//---------------------------------------------------------------------
// Iterate
template <int COUNT, int MAX>
struct Iterate
{
// BucketKeys
static __device__ __forceinline__ void BucketKeys(
AgentRadixSortUpsweep &cta,
UnsignedBits keys[KEYS_PER_THREAD])
{
cta.Bucket(keys[COUNT]);
// Next
Iterate<COUNT + 1, MAX>::BucketKeys(cta, keys);
}
};
// Terminate
template <int MAX>
struct Iterate<MAX, MAX>
{
// BucketKeys
static __device__ __forceinline__ void BucketKeys(AgentRadixSortUpsweep &/*cta*/, UnsignedBits /*keys*/[KEYS_PER_THREAD]) {}
};
//---------------------------------------------------------------------
// Utility methods
//---------------------------------------------------------------------
/**
* Decode a key and increment corresponding smem digit counter
*/
__device__ __forceinline__ void Bucket(UnsignedBits key)
{
// Perform transform op
UnsignedBits converted_key = Traits<KeyT>::TwiddleIn(key);
// Extract current digit bits
UnsignedBits digit = BFE(converted_key, current_bit, num_bits);
// Get sub-counter offset
UnsignedBits sub_counter = digit & (PACKING_RATIO - 1);
// Get row offset
UnsignedBits row_offset = digit >> LOG_PACKING_RATIO;
// Increment counter
temp_storage.thread_counters[row_offset][threadIdx.x][sub_counter]++;
}
/**
* Reset composite counters
*/
__device__ __forceinline__ void ResetDigitCounters()
{
#pragma unroll
for (int LANE = 0; LANE < COUNTER_LANES; LANE++)
{
temp_storage.packed_thread_counters[LANE][threadIdx.x] = 0;
}
}
/**
* Reset the unpacked counters in each thread
*/
__device__ __forceinline__ void ResetUnpackedCounters()
{
#pragma unroll
for (int LANE = 0; LANE < LANES_PER_WARP; LANE++)
{
#pragma unroll
for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++)
{
local_counts[LANE][UNPACKED_COUNTER] = 0;
}
}
}
/**
* Extracts and aggregates the digit counters for each counter lane
* owned by this warp
*/
__device__ __forceinline__ void UnpackDigitCounts()
{
unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS;
unsigned int warp_tid = LaneId();
#pragma unroll
for (int LANE = 0; LANE < LANES_PER_WARP; LANE++)
{
const int counter_lane = (LANE * WARPS) + warp_id;
if (counter_lane < COUNTER_LANES)
{
#pragma unroll
for (int PACKED_COUNTER = 0; PACKED_COUNTER < BLOCK_THREADS; PACKED_COUNTER += WARP_THREADS)
{
#pragma unroll
for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++)
{
OffsetT counter = temp_storage.thread_counters[counter_lane][warp_tid + PACKED_COUNTER][UNPACKED_COUNTER];
local_counts[LANE][UNPACKED_COUNTER] += counter;
}
}
}
}
}
/**
* Processes a single, full tile
*/
__device__ __forceinline__ void ProcessFullTile(OffsetT block_offset)
{
// Tile of keys
UnsignedBits keys[KEYS_PER_THREAD];
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys_in + block_offset, keys);
// Prevent hoisting
CTA_SYNC();
// Bucket tile of keys
Iterate<0, KEYS_PER_THREAD>::BucketKeys(*this, keys);
}
/**
* Processes a single load (may have some threads masked off)
*/
__device__ __forceinline__ void ProcessPartialTile(
OffsetT block_offset,
const OffsetT &block_end)
{
// Process partial tile if necessary using single loads
block_offset += threadIdx.x;
while (block_offset < block_end)
{
// Load and bucket key
UnsignedBits key = d_keys_in[block_offset];
Bucket(key);
block_offset += BLOCK_THREADS;
}
}
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ AgentRadixSortUpsweep(
TempStorage &temp_storage,
const KeyT *d_keys_in,
int current_bit,
int num_bits)
:
temp_storage(temp_storage.Alias()),
d_keys_in(reinterpret_cast<const UnsignedBits*>(d_keys_in)),
current_bit(current_bit),
num_bits(num_bits)
{}
/**
* Compute radix digit histograms from a segment of input tiles.
*/
__device__ __forceinline__ void ProcessRegion(
OffsetT block_offset,
const OffsetT &block_end)
{
// Reset digit counters in smem and unpacked counters in registers
ResetDigitCounters();
ResetUnpackedCounters();
// Unroll batches of full tiles
while (block_offset + UNROLLED_ELEMENTS <= block_end)
{
for (int i = 0; i < UNROLL_COUNT; ++i)
{
ProcessFullTile(block_offset);
block_offset += TILE_ITEMS;
}
CTA_SYNC();
// Aggregate back into local_count registers to prevent overflow
UnpackDigitCounts();
CTA_SYNC();
// Reset composite counters in lanes
ResetDigitCounters();
}
// Unroll single full tiles
while (block_offset + TILE_ITEMS <= block_end)
{
ProcessFullTile(block_offset);
block_offset += TILE_ITEMS;
}
// Process partial tile if necessary
ProcessPartialTile(
block_offset,
block_end);
CTA_SYNC();
// Aggregate back into local_count registers
UnpackDigitCounts();
}
/**
* Extract counts (saving them to the external array)
*/
template <bool IS_DESCENDING>
__device__ __forceinline__ void ExtractCounts(
OffsetT *counters,
int bin_stride = 1,
int bin_offset = 0)
{
unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS;
unsigned int warp_tid = LaneId();
// Place unpacked digit counters in shared memory
#pragma unroll
for (int LANE = 0; LANE < LANES_PER_WARP; LANE++)
{
int counter_lane = (LANE * WARPS) + warp_id;
if (counter_lane < COUNTER_LANES)
{
int digit_row = counter_lane << LOG_PACKING_RATIO;
#pragma unroll
for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++)
{
int bin_idx = digit_row + UNPACKED_COUNTER;
temp_storage.block_counters[warp_tid][bin_idx] =
local_counts[LANE][UNPACKED_COUNTER];
}
}
}
CTA_SYNC();
// Rake-reduce bin_count reductions
// Whole blocks
#pragma unroll
for (int BIN_BASE = RADIX_DIGITS % BLOCK_THREADS;
(BIN_BASE + BLOCK_THREADS) <= RADIX_DIGITS;
BIN_BASE += BLOCK_THREADS)
{
int bin_idx = BIN_BASE + threadIdx.x;
OffsetT bin_count = 0;
#pragma unroll
for (int i = 0; i < WARP_THREADS; ++i)
bin_count += temp_storage.block_counters[i][bin_idx];
if (IS_DESCENDING)
bin_idx = RADIX_DIGITS - bin_idx - 1;
counters[(bin_stride * bin_idx) + bin_offset] = bin_count;
}
// Remainder
if ((RADIX_DIGITS % BLOCK_THREADS != 0) && (threadIdx.x < RADIX_DIGITS))
{
int bin_idx = threadIdx.x;
OffsetT bin_count = 0;
#pragma unroll
for (int i = 0; i < WARP_THREADS; ++i)
bin_count += temp_storage.block_counters[i][bin_idx];
if (IS_DESCENDING)
bin_idx = RADIX_DIGITS - bin_idx - 1;
counters[(bin_stride * bin_idx) + bin_offset] = bin_count;
}
}
/**
* Extract counts
*/
template <int BINS_TRACKED_PER_THREAD>
__device__ __forceinline__ void ExtractCounts(
OffsetT (&bin_count)[BINS_TRACKED_PER_THREAD]) ///< [out] The exclusive prefix sum for the digits [(threadIdx.x * BINS_TRACKED_PER_THREAD) ... (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1]
{
unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS;
unsigned int warp_tid = LaneId();
// Place unpacked digit counters in shared memory
#pragma unroll
for (int LANE = 0; LANE < LANES_PER_WARP; LANE++)
{
int counter_lane = (LANE * WARPS) + warp_id;
if (counter_lane < COUNTER_LANES)
{
int digit_row = counter_lane << LOG_PACKING_RATIO;
#pragma unroll
for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++)
{
int bin_idx = digit_row + UNPACKED_COUNTER;
temp_storage.block_counters[warp_tid][bin_idx] =
local_counts[LANE][UNPACKED_COUNTER];
}
}
}
CTA_SYNC();
// Rake-reduce bin_count reductions
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
bin_count[track] = 0;
#pragma unroll
for (int i = 0; i < WARP_THREADS; ++i)
bin_count[track] += temp_storage.block_counters[i][bin_idx];
}
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_reduce_by_key.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::AgentReduceByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key.
*/
#pragma once
#include <iterator>
#include "single_pass_scan_operators.cuh"
#include "../block/block_load.cuh"
#include "../block/block_store.cuh"
#include "../block/block_scan.cuh"
#include "../block/block_discontinuity.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../iterator/constant_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Parameterizable tuning policy type for AgentReduceByKey
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use
CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements
BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use
struct AgentReduceByKeyPolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
};
static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use
static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements
static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief AgentReduceByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key
*/
template <
typename AgentReduceByKeyPolicyT, ///< Parameterized AgentReduceByKeyPolicy tuning policy type
typename KeysInputIteratorT, ///< Random-access input iterator type for keys
typename UniqueOutputIteratorT, ///< Random-access output iterator type for keys
typename ValuesInputIteratorT, ///< Random-access input iterator type for values
typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values
typename NumRunsOutputIteratorT, ///< Output iterator type for recording number of items selected
typename EqualityOpT, ///< KeyT equality operator type
typename ReductionOpT, ///< ValueT reduction operator type
typename OffsetT> ///< Signed integer type for global offsets
struct AgentReduceByKey
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
// The input keys type
typedef typename std::iterator_traits<KeysInputIteratorT>::value_type KeyInputT;
// The output keys type
typedef typename If<(Equals<typename std::iterator_traits<UniqueOutputIteratorT>::value_type, void>::VALUE), // KeyOutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<KeysInputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<UniqueOutputIteratorT>::value_type>::Type KeyOutputT; // ... else the output iterator's value type
// The input values type
typedef typename std::iterator_traits<ValuesInputIteratorT>::value_type ValueInputT;
// The output values type
typedef typename If<(Equals<typename std::iterator_traits<AggregatesOutputIteratorT>::value_type, void>::VALUE), // ValueOutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<ValuesInputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<AggregatesOutputIteratorT>::value_type>::Type ValueOutputT; // ... else the output iterator's value type
// Tuple type for scanning (pairs accumulated segment-value with segment-index)
typedef KeyValuePair<OffsetT, ValueOutputT> OffsetValuePairT;
// Tuple type for pairing keys and values
typedef KeyValuePair<KeyOutputT, ValueOutputT> KeyValuePairT;
// Tile status descriptor interface type
typedef ReduceByKeyScanTileState<ValueOutputT, OffsetT> ScanTileStateT;
// Guarded inequality functor
template <typename _EqualityOpT>
struct GuardedInequalityWrapper
{
_EqualityOpT op; ///< Wrapped equality operator
int num_remaining; ///< Items remaining
/// Constructor
__host__ __device__ __forceinline__
GuardedInequalityWrapper(_EqualityOpT op, int num_remaining) : op(op), num_remaining(num_remaining) {}
/// Boolean inequality operator, returns <tt>(a != b)</tt>
template <typename T>
__host__ __device__ __forceinline__ bool operator()(const T &a, const T &b, int idx) const
{
if (idx < num_remaining)
return !op(a, b); // In bounds
// Return true if first out-of-bounds item, false otherwise
return (idx == num_remaining);
}
};
// Constants
enum
{
BLOCK_THREADS = AgentReduceByKeyPolicyT::BLOCK_THREADS,
ITEMS_PER_THREAD = AgentReduceByKeyPolicyT::ITEMS_PER_THREAD,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
TWO_PHASE_SCATTER = (ITEMS_PER_THREAD > 1),
// Whether or not the scan operation has a zero-valued identity value (true if we're performing addition on a primitive type)
HAS_IDENTITY_ZERO = (Equals<ReductionOpT, cub::Sum>::VALUE) && (Traits<ValueOutputT>::PRIMITIVE),
};
// Cache-modified Input iterator wrapper type (for applying cache modifier) for keys
typedef typename If<IsPointer<KeysInputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentReduceByKeyPolicyT::LOAD_MODIFIER, KeyInputT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator
KeysInputIteratorT>::Type // Directly use the supplied input iterator type
WrappedKeysInputIteratorT;
// Cache-modified Input iterator wrapper type (for applying cache modifier) for values
typedef typename If<IsPointer<ValuesInputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentReduceByKeyPolicyT::LOAD_MODIFIER, ValueInputT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator
ValuesInputIteratorT>::Type // Directly use the supplied input iterator type
WrappedValuesInputIteratorT;
// Cache-modified Input iterator wrapper type (for applying cache modifier) for fixup values
typedef typename If<IsPointer<AggregatesOutputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentReduceByKeyPolicyT::LOAD_MODIFIER, ValueInputT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator
AggregatesOutputIteratorT>::Type // Directly use the supplied input iterator type
WrappedFixupInputIteratorT;
// Reduce-value-by-segment scan operator
typedef ReduceBySegmentOp<ReductionOpT> ReduceBySegmentOpT;
// Parameterized BlockLoad type for keys
typedef BlockLoad<
KeyOutputT,
BLOCK_THREADS,
ITEMS_PER_THREAD,
AgentReduceByKeyPolicyT::LOAD_ALGORITHM>
BlockLoadKeysT;
// Parameterized BlockLoad type for values
typedef BlockLoad<
ValueOutputT,
BLOCK_THREADS,
ITEMS_PER_THREAD,
AgentReduceByKeyPolicyT::LOAD_ALGORITHM>
BlockLoadValuesT;
// Parameterized BlockDiscontinuity type for keys
typedef BlockDiscontinuity<
KeyOutputT,
BLOCK_THREADS>
BlockDiscontinuityKeys;
// Parameterized BlockScan type
typedef BlockScan<
OffsetValuePairT,
BLOCK_THREADS,
AgentReduceByKeyPolicyT::SCAN_ALGORITHM>
BlockScanT;
// Callback type for obtaining tile prefix during block scan
typedef TilePrefixCallbackOp<
OffsetValuePairT,
ReduceBySegmentOpT,
ScanTileStateT>
TilePrefixCallbackOpT;
// Key and value exchange types
typedef KeyOutputT KeyExchangeT[TILE_ITEMS + 1];
typedef ValueOutputT ValueExchangeT[TILE_ITEMS + 1];
// Shared memory type for this thread block
union _TempStorage
{
struct
{
typename BlockScanT::TempStorage scan; // Smem needed for tile scanning
typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback
typename BlockDiscontinuityKeys::TempStorage discontinuity; // Smem needed for discontinuity detection
};
// Smem needed for loading keys
typename BlockLoadKeysT::TempStorage load_keys;
// Smem needed for loading values
typename BlockLoadValuesT::TempStorage load_values;
// Smem needed for compacting key value pairs(allows non POD items in this union)
Uninitialized<KeyValuePairT[TILE_ITEMS + 1]> raw_exchange;
};
// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
_TempStorage& temp_storage; ///< Reference to temp_storage
WrappedKeysInputIteratorT d_keys_in; ///< Input keys
UniqueOutputIteratorT d_unique_out; ///< Unique output keys
WrappedValuesInputIteratorT d_values_in; ///< Input values
AggregatesOutputIteratorT d_aggregates_out; ///< Output value aggregates
NumRunsOutputIteratorT d_num_runs_out; ///< Output pointer for total number of segments identified
EqualityOpT equality_op; ///< KeyT equality operator
ReductionOpT reduction_op; ///< Reduction operator
ReduceBySegmentOpT scan_op; ///< Reduce-by-segment scan operator
//---------------------------------------------------------------------
// Constructor
//---------------------------------------------------------------------
// Constructor
__device__ __forceinline__
AgentReduceByKey(
TempStorage& temp_storage, ///< Reference to temp_storage
KeysInputIteratorT d_keys_in, ///< Input keys
UniqueOutputIteratorT d_unique_out, ///< Unique output keys
ValuesInputIteratorT d_values_in, ///< Input values
AggregatesOutputIteratorT d_aggregates_out, ///< Output value aggregates
NumRunsOutputIteratorT d_num_runs_out, ///< Output pointer for total number of segments identified
EqualityOpT equality_op, ///< KeyT equality operator
ReductionOpT reduction_op) ///< ValueT reduction operator
:
temp_storage(temp_storage.Alias()),
d_keys_in(d_keys_in),
d_unique_out(d_unique_out),
d_values_in(d_values_in),
d_aggregates_out(d_aggregates_out),
d_num_runs_out(d_num_runs_out),
equality_op(equality_op),
reduction_op(reduction_op),
scan_op(reduction_op)
{}
//---------------------------------------------------------------------
// Scatter utility methods
//---------------------------------------------------------------------
/**
* Directly scatter flagged items to output offsets
*/
__device__ __forceinline__ void ScatterDirect(
KeyValuePairT (&scatter_items)[ITEMS_PER_THREAD],
OffsetT (&segment_flags)[ITEMS_PER_THREAD],
OffsetT (&segment_indices)[ITEMS_PER_THREAD])
{
// Scatter flagged keys and values
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
if (segment_flags[ITEM])
{
d_unique_out[segment_indices[ITEM]] = scatter_items[ITEM].key;
d_aggregates_out[segment_indices[ITEM]] = scatter_items[ITEM].value;
}
}
}
/**
* 2-phase scatter flagged items to output offsets
*
* The exclusive scan causes each head flag to be paired with the previous
* value aggregate: the scatter offsets must be decremented for value aggregates
*/
__device__ __forceinline__ void ScatterTwoPhase(
KeyValuePairT (&scatter_items)[ITEMS_PER_THREAD],
OffsetT (&segment_flags)[ITEMS_PER_THREAD],
OffsetT (&segment_indices)[ITEMS_PER_THREAD],
OffsetT num_tile_segments,
OffsetT num_tile_segments_prefix)
{
CTA_SYNC();
// Compact and scatter pairs
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
if (segment_flags[ITEM])
{
temp_storage.raw_exchange.Alias()[segment_indices[ITEM] - num_tile_segments_prefix] = scatter_items[ITEM];
}
}
CTA_SYNC();
for (int item = threadIdx.x; item < num_tile_segments; item += BLOCK_THREADS)
{
KeyValuePairT pair = temp_storage.raw_exchange.Alias()[item];
d_unique_out[num_tile_segments_prefix + item] = pair.key;
d_aggregates_out[num_tile_segments_prefix + item] = pair.value;
}
}
/**
* Scatter flagged items
*/
__device__ __forceinline__ void Scatter(
KeyValuePairT (&scatter_items)[ITEMS_PER_THREAD],
OffsetT (&segment_flags)[ITEMS_PER_THREAD],
OffsetT (&segment_indices)[ITEMS_PER_THREAD],
OffsetT num_tile_segments,
OffsetT num_tile_segments_prefix)
{
// Do a one-phase scatter if (a) two-phase is disabled or (b) the average number of selected items per thread is less than one
if (TWO_PHASE_SCATTER && (num_tile_segments > BLOCK_THREADS))
{
ScatterTwoPhase(
scatter_items,
segment_flags,
segment_indices,
num_tile_segments,
num_tile_segments_prefix);
}
else
{
ScatterDirect(
scatter_items,
segment_flags,
segment_indices);
}
}
//---------------------------------------------------------------------
// Cooperatively scan a device-wide sequence of tiles with other CTAs
//---------------------------------------------------------------------
/**
* Process a tile of input (dynamic chained scan)
*/
template <bool IS_LAST_TILE> ///< Whether the current tile is the last tile
__device__ __forceinline__ void ConsumeTile(
OffsetT num_remaining, ///< Number of global input items remaining (including this tile)
int tile_idx, ///< Tile index
OffsetT tile_offset, ///< Tile offset
ScanTileStateT& tile_state) ///< Global tile state descriptor
{
KeyOutputT keys[ITEMS_PER_THREAD]; // Tile keys
KeyOutputT prev_keys[ITEMS_PER_THREAD]; // Tile keys shuffled up
ValueOutputT values[ITEMS_PER_THREAD]; // Tile values
OffsetT head_flags[ITEMS_PER_THREAD]; // Segment head flags
OffsetT segment_indices[ITEMS_PER_THREAD]; // Segment indices
OffsetValuePairT scan_items[ITEMS_PER_THREAD]; // Zipped values and segment flags|indices
KeyValuePairT scatter_items[ITEMS_PER_THREAD]; // Zipped key value pairs for scattering
// Load keys
if (IS_LAST_TILE)
BlockLoadKeysT(temp_storage.load_keys).Load(d_keys_in + tile_offset, keys, num_remaining);
else
BlockLoadKeysT(temp_storage.load_keys).Load(d_keys_in + tile_offset, keys);
// Load tile predecessor key in first thread
KeyOutputT tile_predecessor;
if (threadIdx.x == 0)
{
tile_predecessor = (tile_idx == 0) ?
keys[0] : // First tile gets repeat of first item (thus first item will not be flagged as a head)
d_keys_in[tile_offset - 1]; // Subsequent tiles get last key from previous tile
}
CTA_SYNC();
// Load values
if (IS_LAST_TILE)
BlockLoadValuesT(temp_storage.load_values).Load(d_values_in + tile_offset, values, num_remaining);
else
BlockLoadValuesT(temp_storage.load_values).Load(d_values_in + tile_offset, values);
CTA_SYNC();
// Initialize head-flags and shuffle up the previous keys
if (IS_LAST_TILE)
{
// Use custom flag operator to additionally flag the first out-of-bounds item
GuardedInequalityWrapper<EqualityOpT> flag_op(equality_op, num_remaining);
BlockDiscontinuityKeys(temp_storage.discontinuity).FlagHeads(
head_flags, keys, prev_keys, flag_op, tile_predecessor);
}
else
{
InequalityWrapper<EqualityOpT> flag_op(equality_op);
BlockDiscontinuityKeys(temp_storage.discontinuity).FlagHeads(
head_flags, keys, prev_keys, flag_op, tile_predecessor);
}
// Zip values and head flags
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
scan_items[ITEM].value = values[ITEM];
scan_items[ITEM].key = head_flags[ITEM];
}
// Perform exclusive tile scan
OffsetValuePairT block_aggregate; // Inclusive block-wide scan aggregate
OffsetT num_segments_prefix; // Number of segments prior to this tile
ValueOutputT total_aggregate; // The tile prefix folded with block_aggregate
if (tile_idx == 0)
{
// Scan first tile
BlockScanT(temp_storage.scan).ExclusiveScan(scan_items, scan_items, scan_op, block_aggregate);
num_segments_prefix = 0;
total_aggregate = block_aggregate.value;
// Update tile status if there are successor tiles
if ((!IS_LAST_TILE) && (threadIdx.x == 0))
tile_state.SetInclusive(0, block_aggregate);
}
else
{
// Scan non-first tile
TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.prefix, scan_op, tile_idx);
BlockScanT(temp_storage.scan).ExclusiveScan(scan_items, scan_items, scan_op, prefix_op);
block_aggregate = prefix_op.GetBlockAggregate();
num_segments_prefix = prefix_op.GetExclusivePrefix().key;
total_aggregate = reduction_op(
prefix_op.GetExclusivePrefix().value,
block_aggregate.value);
}
// Rezip scatter items and segment indices
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
scatter_items[ITEM].key = prev_keys[ITEM];
scatter_items[ITEM].value = scan_items[ITEM].value;
segment_indices[ITEM] = scan_items[ITEM].key;
}
// At this point, each flagged segment head has:
// - The key for the previous segment
// - The reduced value from the previous segment
// - The segment index for the reduced value
// Scatter flagged keys and values
OffsetT num_tile_segments = block_aggregate.key;
Scatter(scatter_items, head_flags, segment_indices, num_tile_segments, num_segments_prefix);
// Last thread in last tile will output final count (and last pair, if necessary)
if ((IS_LAST_TILE) && (threadIdx.x == BLOCK_THREADS - 1))
{
OffsetT num_segments = num_segments_prefix + num_tile_segments;
// If the last tile is a whole tile, output the final_value
if (num_remaining == TILE_ITEMS)
{
d_unique_out[num_segments] = keys[ITEMS_PER_THREAD - 1];
d_aggregates_out[num_segments] = total_aggregate;
num_segments++;
}
// Output the total number of items selected
*d_num_runs_out = num_segments;
}
}
/**
* Scan tiles of items as part of a dynamic chained scan
*/
__device__ __forceinline__ void ConsumeRange(
int num_items, ///< Total number of input items
ScanTileStateT& tile_state, ///< Global tile state descriptor
int start_tile) ///< The starting tile for the current grid
{
// Blocks are launched in increasing order, so just assign one tile per block
int tile_idx = start_tile + blockIdx.x; // Current tile index
OffsetT tile_offset = OffsetT(TILE_ITEMS) * tile_idx; // Global offset for the current tile
OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile)
if (num_remaining > TILE_ITEMS)
{
// Not last tile
ConsumeTile<false>(num_remaining, tile_idx, tile_offset, tile_state);
}
else if (num_remaining > 0)
{
// Last tile
ConsumeTile<true>(num_remaining, tile_idx, tile_offset, tile_state);
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_radix_sort_downsweep.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* AgentRadixSortDownsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort downsweep .
*/
#pragma once
#include <stdint.h>
#include "../thread/thread_load.cuh"
#include "../block/block_load.cuh"
#include "../block/block_store.cuh"
#include "../block/block_radix_rank.cuh"
#include "../block/block_exchange.cuh"
#include "../util_type.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Radix ranking algorithm
*/
enum RadixRankAlgorithm
{
RADIX_RANK_BASIC,
RADIX_RANK_MEMOIZE,
RADIX_RANK_MATCH
};
/**
* Parameterizable tuning policy type for AgentRadixSortDownsweep
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use
CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading keys (and values)
RadixRankAlgorithm _RANK_ALGORITHM, ///< The radix ranking algorithm to use
BlockScanAlgorithm _SCAN_ALGORITHM, ///< The block scan algorithm to use
int _RADIX_BITS> ///< The number of radix bits, i.e., log2(bins)
struct AgentRadixSortDownsweepPolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
RADIX_BITS = _RADIX_BITS, ///< The number of radix bits, i.e., log2(bins)
};
static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use
static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading keys (and values)
static const RadixRankAlgorithm RANK_ALGORITHM = _RANK_ALGORITHM; ///< The radix ranking algorithm to use
static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief AgentRadixSortDownsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort downsweep .
*/
template <
typename AgentRadixSortDownsweepPolicy, ///< Parameterized AgentRadixSortDownsweepPolicy tuning policy type
bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low
typename KeyT, ///< KeyT type
typename ValueT, ///< ValueT type
typename OffsetT> ///< Signed integer type for global offsets
struct AgentRadixSortDownsweep
{
//---------------------------------------------------------------------
// Type definitions and constants
//---------------------------------------------------------------------
// Appropriate unsigned-bits representation of KeyT
typedef typename Traits<KeyT>::UnsignedBits UnsignedBits;
static const UnsignedBits LOWEST_KEY = Traits<KeyT>::LOWEST_KEY;
static const UnsignedBits MAX_KEY = Traits<KeyT>::MAX_KEY;
static const BlockLoadAlgorithm LOAD_ALGORITHM = AgentRadixSortDownsweepPolicy::LOAD_ALGORITHM;
static const CacheLoadModifier LOAD_MODIFIER = AgentRadixSortDownsweepPolicy::LOAD_MODIFIER;
static const RadixRankAlgorithm RANK_ALGORITHM = AgentRadixSortDownsweepPolicy::RANK_ALGORITHM;
static const BlockScanAlgorithm SCAN_ALGORITHM = AgentRadixSortDownsweepPolicy::SCAN_ALGORITHM;
enum
{
BLOCK_THREADS = AgentRadixSortDownsweepPolicy::BLOCK_THREADS,
ITEMS_PER_THREAD = AgentRadixSortDownsweepPolicy::ITEMS_PER_THREAD,
RADIX_BITS = AgentRadixSortDownsweepPolicy::RADIX_BITS,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
RADIX_DIGITS = 1 << RADIX_BITS,
KEYS_ONLY = Equals<ValueT, NullType>::VALUE,
};
// Input iterator wrapper type (for applying cache modifier)s
typedef CacheModifiedInputIterator<LOAD_MODIFIER, UnsignedBits, OffsetT> KeysItr;
typedef CacheModifiedInputIterator<LOAD_MODIFIER, ValueT, OffsetT> ValuesItr;
// Radix ranking type to use
typedef typename If<(RANK_ALGORITHM == RADIX_RANK_BASIC),
BlockRadixRank<BLOCK_THREADS, RADIX_BITS, IS_DESCENDING, false, SCAN_ALGORITHM>,
typename If<(RANK_ALGORITHM == RADIX_RANK_MEMOIZE),
BlockRadixRank<BLOCK_THREADS, RADIX_BITS, IS_DESCENDING, true, SCAN_ALGORITHM>,
BlockRadixRankMatch<BLOCK_THREADS, RADIX_BITS, IS_DESCENDING, SCAN_ALGORITHM>
>::Type
>::Type BlockRadixRankT;
enum
{
/// Number of bin-starting offsets tracked per thread
BINS_TRACKED_PER_THREAD = BlockRadixRankT::BINS_TRACKED_PER_THREAD
};
// BlockLoad type (keys)
typedef BlockLoad<
UnsignedBits,
BLOCK_THREADS,
ITEMS_PER_THREAD,
LOAD_ALGORITHM> BlockLoadKeysT;
// BlockLoad type (values)
typedef BlockLoad<
ValueT,
BLOCK_THREADS,
ITEMS_PER_THREAD,
LOAD_ALGORITHM> BlockLoadValuesT;
// Value exchange array type
typedef ValueT ValueExchangeT[TILE_ITEMS];
/**
* Shared memory storage layout
*/
union __align__(16) _TempStorage
{
typename BlockLoadKeysT::TempStorage load_keys;
typename BlockLoadValuesT::TempStorage load_values;
typename BlockRadixRankT::TempStorage radix_rank;
struct
{
UnsignedBits exchange_keys[TILE_ITEMS];
OffsetT relative_bin_offsets[RADIX_DIGITS];
};
Uninitialized<ValueExchangeT> exchange_values;
OffsetT exclusive_digit_prefix[RADIX_DIGITS];
};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Thread fields
//---------------------------------------------------------------------
// Shared storage for this CTA
_TempStorage &temp_storage;
// Input and output device pointers
KeysItr d_keys_in;
ValuesItr d_values_in;
UnsignedBits *d_keys_out;
ValueT *d_values_out;
// The global scatter base offset for each digit (valid in the first RADIX_DIGITS threads)
OffsetT bin_offset[BINS_TRACKED_PER_THREAD];
// The least-significant bit position of the current digit to extract
int current_bit;
// Number of bits in current digit
int num_bits;
// Whether to short-cirucit
int short_circuit;
//---------------------------------------------------------------------
// Utility methods
//---------------------------------------------------------------------
/**
* Scatter ranked keys through shared memory, then to device-accessible memory
*/
template <bool FULL_TILE>
__device__ __forceinline__ void ScatterKeys(
UnsignedBits (&twiddled_keys)[ITEMS_PER_THREAD],
OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD],
int (&ranks)[ITEMS_PER_THREAD],
OffsetT valid_items)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
temp_storage.exchange_keys[ranks[ITEM]] = twiddled_keys[ITEM];
}
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
UnsignedBits key = temp_storage.exchange_keys[threadIdx.x + (ITEM * BLOCK_THREADS)];
UnsignedBits digit = BFE(key, current_bit, num_bits);
relative_bin_offsets[ITEM] = temp_storage.relative_bin_offsets[digit];
// Un-twiddle
key = Traits<KeyT>::TwiddleOut(key);
if (FULL_TILE ||
(static_cast<OffsetT>(threadIdx.x + (ITEM * BLOCK_THREADS)) < valid_items))
{
d_keys_out[relative_bin_offsets[ITEM] + threadIdx.x + (ITEM * BLOCK_THREADS)] = key;
}
}
}
/**
* Scatter ranked values through shared memory, then to device-accessible memory
*/
template <bool FULL_TILE>
__device__ __forceinline__ void ScatterValues(
ValueT (&values)[ITEMS_PER_THREAD],
OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD],
int (&ranks)[ITEMS_PER_THREAD],
OffsetT valid_items)
{
CTA_SYNC();
ValueExchangeT &exchange_values = temp_storage.exchange_values.Alias();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
exchange_values[ranks[ITEM]] = values[ITEM];
}
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
ValueT value = exchange_values[threadIdx.x + (ITEM * BLOCK_THREADS)];
if (FULL_TILE ||
(static_cast<OffsetT>(threadIdx.x + (ITEM * BLOCK_THREADS)) < valid_items))
{
d_values_out[relative_bin_offsets[ITEM] + threadIdx.x + (ITEM * BLOCK_THREADS)] = value;
}
}
}
/**
* Load a tile of keys (specialized for full tile, any ranking algorithm)
*/
template <int _RANK_ALGORITHM>
__device__ __forceinline__ void LoadKeys(
UnsignedBits (&keys)[ITEMS_PER_THREAD],
OffsetT block_offset,
OffsetT valid_items,
UnsignedBits oob_item,
Int2Type<true> is_full_tile,
Int2Type<_RANK_ALGORITHM> rank_algorithm)
{
BlockLoadKeysT(temp_storage.load_keys).Load(
d_keys_in + block_offset, keys);
CTA_SYNC();
}
/**
* Load a tile of keys (specialized for partial tile, any ranking algorithm)
*/
template <int _RANK_ALGORITHM>
__device__ __forceinline__ void LoadKeys(
UnsignedBits (&keys)[ITEMS_PER_THREAD],
OffsetT block_offset,
OffsetT valid_items,
UnsignedBits oob_item,
Int2Type<false> is_full_tile,
Int2Type<_RANK_ALGORITHM> rank_algorithm)
{
BlockLoadKeysT(temp_storage.load_keys).Load(
d_keys_in + block_offset, keys, valid_items, oob_item);
CTA_SYNC();
}
/**
* Load a tile of keys (specialized for full tile, match ranking algorithm)
*/
__device__ __forceinline__ void LoadKeys(
UnsignedBits (&keys)[ITEMS_PER_THREAD],
OffsetT block_offset,
OffsetT valid_items,
UnsignedBits oob_item,
Int2Type<true> is_full_tile,
Int2Type<RADIX_RANK_MATCH> rank_algorithm)
{
LoadDirectWarpStriped(threadIdx.x, d_keys_in + block_offset, keys);
}
/**
* Load a tile of keys (specialized for partial tile, match ranking algorithm)
*/
__device__ __forceinline__ void LoadKeys(
UnsignedBits (&keys)[ITEMS_PER_THREAD],
OffsetT block_offset,
OffsetT valid_items,
UnsignedBits oob_item,
Int2Type<false> is_full_tile,
Int2Type<RADIX_RANK_MATCH> rank_algorithm)
{
LoadDirectWarpStriped(threadIdx.x, d_keys_in + block_offset, keys, valid_items, oob_item);
}
/**
* Load a tile of values (specialized for full tile, any ranking algorithm)
*/
template <int _RANK_ALGORITHM>
__device__ __forceinline__ void LoadValues(
ValueT (&values)[ITEMS_PER_THREAD],
OffsetT block_offset,
OffsetT valid_items,
Int2Type<true> is_full_tile,
Int2Type<_RANK_ALGORITHM> rank_algorithm)
{
BlockLoadValuesT(temp_storage.load_values).Load(
d_values_in + block_offset, values);
CTA_SYNC();
}
/**
* Load a tile of values (specialized for partial tile, any ranking algorithm)
*/
template <int _RANK_ALGORITHM>
__device__ __forceinline__ void LoadValues(
ValueT (&values)[ITEMS_PER_THREAD],
OffsetT block_offset,
OffsetT valid_items,
Int2Type<false> is_full_tile,
Int2Type<_RANK_ALGORITHM> rank_algorithm)
{
BlockLoadValuesT(temp_storage.load_values).Load(
d_values_in + block_offset, values, valid_items);
CTA_SYNC();
}
/**
* Load a tile of items (specialized for full tile, match ranking algorithm)
*/
__device__ __forceinline__ void LoadValues(
ValueT (&values)[ITEMS_PER_THREAD],
OffsetT block_offset,
volatile OffsetT valid_items,
Int2Type<true> is_full_tile,
Int2Type<RADIX_RANK_MATCH> rank_algorithm)
{
LoadDirectWarpStriped(threadIdx.x, d_values_in + block_offset, values);
}
/**
* Load a tile of items (specialized for partial tile, match ranking algorithm)
*/
__device__ __forceinline__ void LoadValues(
ValueT (&values)[ITEMS_PER_THREAD],
OffsetT block_offset,
volatile OffsetT valid_items,
Int2Type<false> is_full_tile,
Int2Type<RADIX_RANK_MATCH> rank_algorithm)
{
LoadDirectWarpStriped(threadIdx.x, d_values_in + block_offset, values, valid_items);
}
/**
* Truck along associated values
*/
template <bool FULL_TILE>
__device__ __forceinline__ void GatherScatterValues(
OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD],
int (&ranks)[ITEMS_PER_THREAD],
OffsetT block_offset,
OffsetT valid_items,
Int2Type<false> /*is_keys_only*/)
{
CTA_SYNC();
ValueT values[ITEMS_PER_THREAD];
LoadValues(
values,
block_offset,
valid_items,
Int2Type<FULL_TILE>(),
Int2Type<RANK_ALGORITHM>());
ScatterValues<FULL_TILE>(
values,
relative_bin_offsets,
ranks,
valid_items);
}
/**
* Truck along associated values (specialized for key-only sorting)
*/
template <bool FULL_TILE>
__device__ __forceinline__ void GatherScatterValues(
OffsetT (&/*relative_bin_offsets*/)[ITEMS_PER_THREAD],
int (&/*ranks*/)[ITEMS_PER_THREAD],
OffsetT /*block_offset*/,
OffsetT /*valid_items*/,
Int2Type<true> /*is_keys_only*/)
{}
/**
* Process tile
*/
template <bool FULL_TILE>
__device__ __forceinline__ void ProcessTile(
OffsetT block_offset,
const OffsetT &valid_items = TILE_ITEMS)
{
UnsignedBits keys[ITEMS_PER_THREAD];
int ranks[ITEMS_PER_THREAD];
OffsetT relative_bin_offsets[ITEMS_PER_THREAD];
// Assign default (min/max) value to all keys
UnsignedBits default_key = (IS_DESCENDING) ? LOWEST_KEY : MAX_KEY;
// Load tile of keys
LoadKeys(
keys,
block_offset,
valid_items,
default_key,
Int2Type<FULL_TILE>(),
Int2Type<RANK_ALGORITHM>());
// Twiddle key bits if necessary
#pragma unroll
for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
{
keys[KEY] = Traits<KeyT>::TwiddleIn(keys[KEY]);
}
// Rank the twiddled keys
int exclusive_digit_prefix[BINS_TRACKED_PER_THREAD];
BlockRadixRankT(temp_storage.radix_rank).RankKeys(
keys,
ranks,
current_bit,
num_bits,
exclusive_digit_prefix);
CTA_SYNC();
// Share exclusive digit prefix
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
// Store exclusive prefix
temp_storage.exclusive_digit_prefix[bin_idx] =
exclusive_digit_prefix[track];
}
}
CTA_SYNC();
// Get inclusive digit prefix
int inclusive_digit_prefix[BINS_TRACKED_PER_THREAD];
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
if (IS_DESCENDING)
{
// Get inclusive digit prefix from exclusive prefix (higher bins come first)
inclusive_digit_prefix[track] = (bin_idx == 0) ?
(BLOCK_THREADS * ITEMS_PER_THREAD) :
temp_storage.exclusive_digit_prefix[bin_idx - 1];
}
else
{
// Get inclusive digit prefix from exclusive prefix (lower bins come first)
inclusive_digit_prefix[track] = (bin_idx == RADIX_DIGITS - 1) ?
(BLOCK_THREADS * ITEMS_PER_THREAD) :
temp_storage.exclusive_digit_prefix[bin_idx + 1];
}
}
}
CTA_SYNC();
// Update global scatter base offsets for each digit
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
bin_offset[track] -= exclusive_digit_prefix[track];
temp_storage.relative_bin_offsets[bin_idx] = bin_offset[track];
bin_offset[track] += inclusive_digit_prefix[track];
}
}
CTA_SYNC();
// Scatter keys
ScatterKeys<FULL_TILE>(keys, relative_bin_offsets, ranks, valid_items);
// Gather/scatter values
GatherScatterValues<FULL_TILE>(relative_bin_offsets , ranks, block_offset, valid_items, Int2Type<KEYS_ONLY>());
}
//---------------------------------------------------------------------
// Copy shortcut
//---------------------------------------------------------------------
/**
* Copy tiles within the range of input
*/
template <
typename InputIteratorT,
typename T>
__device__ __forceinline__ void Copy(
InputIteratorT d_in,
T *d_out,
OffsetT block_offset,
OffsetT block_end)
{
// Simply copy the input
while (block_offset + TILE_ITEMS <= block_end)
{
T items[ITEMS_PER_THREAD];
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in + block_offset, items);
CTA_SYNC();
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_out + block_offset, items);
block_offset += TILE_ITEMS;
}
// Clean up last partial tile with guarded-I/O
if (block_offset < block_end)
{
OffsetT valid_items = block_end - block_offset;
T items[ITEMS_PER_THREAD];
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in + block_offset, items, valid_items);
CTA_SYNC();
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_out + block_offset, items, valid_items);
}
}
/**
* Copy tiles within the range of input (specialized for NullType)
*/
template <typename InputIteratorT>
__device__ __forceinline__ void Copy(
InputIteratorT /*d_in*/,
NullType * /*d_out*/,
OffsetT /*block_offset*/,
OffsetT /*block_end*/)
{}
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ AgentRadixSortDownsweep(
TempStorage &temp_storage,
OffsetT (&bin_offset)[BINS_TRACKED_PER_THREAD],
OffsetT num_items,
const KeyT *d_keys_in,
KeyT *d_keys_out,
const ValueT *d_values_in,
ValueT *d_values_out,
int current_bit,
int num_bits)
:
temp_storage(temp_storage.Alias()),
d_keys_in(reinterpret_cast<const UnsignedBits*>(d_keys_in)),
d_values_in(d_values_in),
d_keys_out(reinterpret_cast<UnsignedBits*>(d_keys_out)),
d_values_out(d_values_out),
current_bit(current_bit),
num_bits(num_bits),
short_circuit(1)
{
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
this->bin_offset[track] = bin_offset[track];
int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
// Short circuit if the histogram has only bin counts of only zeros or problem-size
short_circuit = short_circuit && ((bin_offset[track] == 0) || (bin_offset[track] == num_items));
}
}
short_circuit = CTA_SYNC_AND(short_circuit);
}
/**
* Constructor
*/
__device__ __forceinline__ AgentRadixSortDownsweep(
TempStorage &temp_storage,
OffsetT num_items,
OffsetT *d_spine,
const KeyT *d_keys_in,
KeyT *d_keys_out,
const ValueT *d_values_in,
ValueT *d_values_out,
int current_bit,
int num_bits)
:
temp_storage(temp_storage.Alias()),
d_keys_in(reinterpret_cast<const UnsignedBits*>(d_keys_in)),
d_values_in(d_values_in),
d_keys_out(reinterpret_cast<UnsignedBits*>(d_keys_out)),
d_values_out(d_values_out),
current_bit(current_bit),
num_bits(num_bits),
short_circuit(1)
{
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track;
// Load digit bin offsets (each of the first RADIX_DIGITS threads will load an offset for that digit)
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
if (IS_DESCENDING)
bin_idx = RADIX_DIGITS - bin_idx - 1;
// Short circuit if the first block's histogram has only bin counts of only zeros or problem-size
OffsetT first_block_bin_offset = d_spine[gridDim.x * bin_idx];
short_circuit = short_circuit && ((first_block_bin_offset == 0) || (first_block_bin_offset == num_items));
// Load my block's bin offset for my bin
bin_offset[track] = d_spine[(gridDim.x * bin_idx) + blockIdx.x];
}
}
short_circuit = CTA_SYNC_AND(short_circuit);
}
/**
* Distribute keys from a segment of input tiles.
*/
__device__ __forceinline__ void ProcessRegion(
OffsetT block_offset,
OffsetT block_end)
{
if (short_circuit)
{
// Copy keys
Copy(d_keys_in, d_keys_out, block_offset, block_end);
// Copy values
Copy(d_values_in, d_values_out, block_offset, block_end);
}
else
{
// Process full tiles of tile_items
while (block_offset + TILE_ITEMS <= block_end)
{
ProcessTile<true>(block_offset);
block_offset += TILE_ITEMS;
CTA_SYNC();
}
// Clean up last partial tile with guarded-I/O
if (block_offset < block_end)
{
ProcessTile<false>(block_offset, block_end - block_offset);
}
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_scan.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::AgentScan implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan .
*/
#pragma once
#include <iterator>
#include "single_pass_scan_operators.cuh"
#include "../block/block_load.cuh"
#include "../block/block_store.cuh"
#include "../block/block_scan.cuh"
#include "../grid/grid_queue.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Parameterizable tuning policy type for AgentScan
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use
CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements
BlockStoreAlgorithm _STORE_ALGORITHM, ///< The BlockStore algorithm to use
BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use
struct AgentScanPolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
};
static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use
static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements
static const BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; ///< The BlockStore algorithm to use
static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief AgentScan implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan .
*/
template <
typename AgentScanPolicyT, ///< Parameterized AgentScanPolicyT tuning policy type
typename InputIteratorT, ///< Random-access input iterator type
typename OutputIteratorT, ///< Random-access output iterator type
typename ScanOpT, ///< Scan functor type
typename InitValueT, ///< The init_value element for ScanOpT type (cub::NullType for inclusive scan)
typename OffsetT> ///< Signed integer type for global offsets
struct AgentScan
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Tile status descriptor interface type
typedef ScanTileState<OutputT> ScanTileStateT;
// Input iterator wrapper type (for applying cache modifier)
typedef typename If<IsPointer<InputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentScanPolicyT::LOAD_MODIFIER, InputT, OffsetT>, // Wrap the native input pointer with CacheModifiedInputIterator
InputIteratorT>::Type // Directly use the supplied input iterator type
WrappedInputIteratorT;
// Constants
enum
{
IS_INCLUSIVE = Equals<InitValueT, NullType>::VALUE, // Inclusive scan if no init_value type is provided
BLOCK_THREADS = AgentScanPolicyT::BLOCK_THREADS,
ITEMS_PER_THREAD = AgentScanPolicyT::ITEMS_PER_THREAD,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
};
// Parameterized BlockLoad type
typedef BlockLoad<
OutputT,
AgentScanPolicyT::BLOCK_THREADS,
AgentScanPolicyT::ITEMS_PER_THREAD,
AgentScanPolicyT::LOAD_ALGORITHM>
BlockLoadT;
// Parameterized BlockStore type
typedef BlockStore<
OutputT,
AgentScanPolicyT::BLOCK_THREADS,
AgentScanPolicyT::ITEMS_PER_THREAD,
AgentScanPolicyT::STORE_ALGORITHM>
BlockStoreT;
// Parameterized BlockScan type
typedef BlockScan<
OutputT,
AgentScanPolicyT::BLOCK_THREADS,
AgentScanPolicyT::SCAN_ALGORITHM>
BlockScanT;
// Callback type for obtaining tile prefix during block scan
typedef TilePrefixCallbackOp<
OutputT,
ScanOpT,
ScanTileStateT>
TilePrefixCallbackOpT;
// Stateful BlockScan prefix callback type for managing a running total while scanning consecutive tiles
typedef BlockScanRunningPrefixOp<
OutputT,
ScanOpT>
RunningPrefixCallbackOp;
// Shared memory type for this thread block
union _TempStorage
{
typename BlockLoadT::TempStorage load; // Smem needed for tile loading
typename BlockStoreT::TempStorage store; // Smem needed for tile storing
struct
{
typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback
typename BlockScanT::TempStorage scan; // Smem needed for tile scanning
};
};
// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
_TempStorage& temp_storage; ///< Reference to temp_storage
WrappedInputIteratorT d_in; ///< Input data
OutputIteratorT d_out; ///< Output data
ScanOpT scan_op; ///< Binary scan operator
InitValueT init_value; ///< The init_value element for ScanOpT
//---------------------------------------------------------------------
// Block scan utility methods
//---------------------------------------------------------------------
/**
* Exclusive scan specialization (first tile)
*/
__device__ __forceinline__
void ScanTile(
OutputT (&items)[ITEMS_PER_THREAD],
OutputT init_value,
ScanOpT scan_op,
OutputT &block_aggregate,
Int2Type<false> /*is_inclusive*/)
{
BlockScanT(temp_storage.scan).ExclusiveScan(items, items, init_value, scan_op, block_aggregate);
block_aggregate = scan_op(init_value, block_aggregate);
}
/**
* Inclusive scan specialization (first tile)
*/
__device__ __forceinline__
void ScanTile(
OutputT (&items)[ITEMS_PER_THREAD],
InitValueT /*init_value*/,
ScanOpT scan_op,
OutputT &block_aggregate,
Int2Type<true> /*is_inclusive*/)
{
BlockScanT(temp_storage.scan).InclusiveScan(items, items, scan_op, block_aggregate);
}
/**
* Exclusive scan specialization (subsequent tiles)
*/
template <typename PrefixCallback>
__device__ __forceinline__
void ScanTile(
OutputT (&items)[ITEMS_PER_THREAD],
ScanOpT scan_op,
PrefixCallback &prefix_op,
Int2Type<false> /*is_inclusive*/)
{
BlockScanT(temp_storage.scan).ExclusiveScan(items, items, scan_op, prefix_op);
}
/**
* Inclusive scan specialization (subsequent tiles)
*/
template <typename PrefixCallback>
__device__ __forceinline__
void ScanTile(
OutputT (&items)[ITEMS_PER_THREAD],
ScanOpT scan_op,
PrefixCallback &prefix_op,
Int2Type<true> /*is_inclusive*/)
{
BlockScanT(temp_storage.scan).InclusiveScan(items, items, scan_op, prefix_op);
}
//---------------------------------------------------------------------
// Constructor
//---------------------------------------------------------------------
// Constructor
__device__ __forceinline__
AgentScan(
TempStorage& temp_storage, ///< Reference to temp_storage
InputIteratorT d_in, ///< Input data
OutputIteratorT d_out, ///< Output data
ScanOpT scan_op, ///< Binary scan operator
InitValueT init_value) ///< Initial value to seed the exclusive scan
:
temp_storage(temp_storage.Alias()),
d_in(d_in),
d_out(d_out),
scan_op(scan_op),
init_value(init_value)
{}
//---------------------------------------------------------------------
// Cooperatively scan a device-wide sequence of tiles with other CTAs
//---------------------------------------------------------------------
/**
* Process a tile of input (dynamic chained scan)
*/
template <bool IS_LAST_TILE> ///< Whether the current tile is the last tile
__device__ __forceinline__ void ConsumeTile(
OffsetT num_remaining, ///< Number of global input items remaining (including this tile)
int tile_idx, ///< Tile index
OffsetT tile_offset, ///< Tile offset
ScanTileStateT& tile_state) ///< Global tile state descriptor
{
// Load items
OutputT items[ITEMS_PER_THREAD];
if (IS_LAST_TILE)
BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items, num_remaining);
else
BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items);
CTA_SYNC();
// Perform tile scan
if (tile_idx == 0)
{
// Scan first tile
OutputT block_aggregate;
ScanTile(items, init_value, scan_op, block_aggregate, Int2Type<IS_INCLUSIVE>());
if ((!IS_LAST_TILE) && (threadIdx.x == 0))
tile_state.SetInclusive(0, block_aggregate);
}
else
{
// Scan non-first tile
TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.prefix, scan_op, tile_idx);
ScanTile(items, scan_op, prefix_op, Int2Type<IS_INCLUSIVE>());
}
CTA_SYNC();
// Store items
if (IS_LAST_TILE)
BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items, num_remaining);
else
BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items);
}
/**
* Scan tiles of items as part of a dynamic chained scan
*/
__device__ __forceinline__ void ConsumeRange(
int num_items, ///< Total number of input items
ScanTileStateT& tile_state, ///< Global tile state descriptor
int start_tile) ///< The starting tile for the current grid
{
// Blocks are launched in increasing order, so just assign one tile per block
int tile_idx = start_tile + blockIdx.x; // Current tile index
OffsetT tile_offset = OffsetT(TILE_ITEMS) * tile_idx; // Global offset for the current tile
OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile)
if (num_remaining > TILE_ITEMS)
{
// Not last tile
ConsumeTile<false>(num_remaining, tile_idx, tile_offset, tile_state);
}
else if (num_remaining > 0)
{
// Last tile
ConsumeTile<true>(num_remaining, tile_idx, tile_offset, tile_state);
}
}
//---------------------------------------------------------------------
// Scan an sequence of consecutive tiles (independent of other thread blocks)
//---------------------------------------------------------------------
/**
* Process a tile of input
*/
template <
bool IS_FIRST_TILE,
bool IS_LAST_TILE>
__device__ __forceinline__ void ConsumeTile(
OffsetT tile_offset, ///< Tile offset
RunningPrefixCallbackOp& prefix_op, ///< Running prefix operator
int valid_items = TILE_ITEMS) ///< Number of valid items in the tile
{
// Load items
OutputT items[ITEMS_PER_THREAD];
if (IS_LAST_TILE)
BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items, valid_items);
else
BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items);
CTA_SYNC();
// Block scan
if (IS_FIRST_TILE)
{
OutputT block_aggregate;
ScanTile(items, init_value, scan_op, block_aggregate, Int2Type<IS_INCLUSIVE>());
prefix_op.running_total = block_aggregate;
}
else
{
ScanTile(items, scan_op, prefix_op, Int2Type<IS_INCLUSIVE>());
}
CTA_SYNC();
// Store items
if (IS_LAST_TILE)
BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items, valid_items);
else
BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items);
}
/**
* Scan a consecutive share of input tiles
*/
__device__ __forceinline__ void ConsumeRange(
OffsetT range_offset, ///< [in] Threadblock begin offset (inclusive)
OffsetT range_end) ///< [in] Threadblock end offset (exclusive)
{
BlockScanRunningPrefixOp<OutputT, ScanOpT> prefix_op(scan_op);
if (range_offset + TILE_ITEMS <= range_end)
{
// Consume first tile of input (full)
ConsumeTile<true, true>(range_offset, prefix_op);
range_offset += TILE_ITEMS;
// Consume subsequent full tiles of input
while (range_offset + TILE_ITEMS <= range_end)
{
ConsumeTile<false, true>(range_offset, prefix_op);
range_offset += TILE_ITEMS;
}
// Consume a partially-full tile
if (range_offset < range_end)
{
int valid_items = range_end - range_offset;
ConsumeTile<false, false>(range_offset, prefix_op, valid_items);
}
}
else
{
// Consume the first tile of input (partially-full)
int valid_items = range_end - range_offset;
ConsumeTile<true, false>(range_offset, prefix_op, valid_items);
}
}
/**
* Scan a consecutive share of input tiles, seeded with the specified prefix value
*/
__device__ __forceinline__ void ConsumeRange(
OffsetT range_offset, ///< [in] Threadblock begin offset (inclusive)
OffsetT range_end, ///< [in] Threadblock end offset (exclusive)
OutputT prefix) ///< [in] The prefix to apply to the scan segment
{
BlockScanRunningPrefixOp<OutputT, ScanOpT> prefix_op(prefix, scan_op);
// Consume full tiles of input
while (range_offset + TILE_ITEMS <= range_end)
{
ConsumeTile<true, false>(range_offset, prefix_op);
range_offset += TILE_ITEMS;
}
// Consume a partially-full tile
if (range_offset < range_end)
{
int valid_items = range_end - range_offset;
ConsumeTile<false, false>(range_offset, prefix_op, valid_items);
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_reduce.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::AgentReduce implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduction .
*/
#pragma once
#include <iterator>
#include "../block/block_load.cuh"
#include "../block/block_reduce.cuh"
#include "../grid/grid_mapping.cuh"
#include "../grid/grid_even_share.cuh"
#include "../util_type.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Parameterizable tuning policy type for AgentReduce
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
int _VECTOR_LOAD_LENGTH, ///< Number of items per vectorized load
BlockReduceAlgorithm _BLOCK_ALGORITHM, ///< Cooperative block-wide reduction algorithm to use
CacheLoadModifier _LOAD_MODIFIER> ///< Cache load modifier for reading input elements
struct AgentReducePolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
VECTOR_LOAD_LENGTH = _VECTOR_LOAD_LENGTH, ///< Number of items per vectorized load
};
static const BlockReduceAlgorithm BLOCK_ALGORITHM = _BLOCK_ALGORITHM; ///< Cooperative block-wide reduction algorithm to use
static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief AgentReduce implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduction .
*
* Each thread reduces only the values it loads. If \p FIRST_TILE, this
* partial reduction is stored into \p thread_aggregate. Otherwise it is
* accumulated into \p thread_aggregate.
*/
template <
typename AgentReducePolicy, ///< Parameterized AgentReducePolicy tuning policy type
typename InputIteratorT, ///< Random-access iterator type for input
typename OutputIteratorT, ///< Random-access iterator type for output
typename OffsetT, ///< Signed integer type for global offsets
typename ReductionOp> ///< Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt>
struct AgentReduce
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
/// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
/// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
/// Vector type of InputT for data movement
typedef typename CubVector<InputT, AgentReducePolicy::VECTOR_LOAD_LENGTH>::Type VectorT;
/// Input iterator wrapper type (for applying cache modifier)
typedef typename If<IsPointer<InputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentReducePolicy::LOAD_MODIFIER, InputT, OffsetT>, // Wrap the native input pointer with CacheModifiedInputIterator
InputIteratorT>::Type // Directly use the supplied input iterator type
WrappedInputIteratorT;
/// Constants
enum
{
BLOCK_THREADS = AgentReducePolicy::BLOCK_THREADS,
ITEMS_PER_THREAD = AgentReducePolicy::ITEMS_PER_THREAD,
VECTOR_LOAD_LENGTH = CUB_MIN(ITEMS_PER_THREAD, AgentReducePolicy::VECTOR_LOAD_LENGTH),
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
// Can vectorize according to the policy if the input iterator is a native pointer to a primitive type
ATTEMPT_VECTORIZATION = (VECTOR_LOAD_LENGTH > 1) &&
(ITEMS_PER_THREAD % VECTOR_LOAD_LENGTH == 0) &&
(IsPointer<InputIteratorT>::VALUE) && Traits<InputT>::PRIMITIVE,
};
static const CacheLoadModifier LOAD_MODIFIER = AgentReducePolicy::LOAD_MODIFIER;
static const BlockReduceAlgorithm BLOCK_ALGORITHM = AgentReducePolicy::BLOCK_ALGORITHM;
/// Parameterized BlockReduce primitive
typedef BlockReduce<OutputT, BLOCK_THREADS, AgentReducePolicy::BLOCK_ALGORITHM> BlockReduceT;
/// Shared memory type required by this thread block
struct _TempStorage
{
typename BlockReduceT::TempStorage reduce;
};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
_TempStorage& temp_storage; ///< Reference to temp_storage
InputIteratorT d_in; ///< Input data to reduce
WrappedInputIteratorT d_wrapped_in; ///< Wrapped input data to reduce
ReductionOp reduction_op; ///< Binary reduction operator
//---------------------------------------------------------------------
// Utility
//---------------------------------------------------------------------
// Whether or not the input is aligned with the vector type (specialized for types we can vectorize)
template <typename Iterator>
static __device__ __forceinline__ bool IsAligned(
Iterator d_in,
Int2Type<true> /*can_vectorize*/)
{
return (size_t(d_in) & (sizeof(VectorT) - 1)) == 0;
}
// Whether or not the input is aligned with the vector type (specialized for types we cannot vectorize)
template <typename Iterator>
static __device__ __forceinline__ bool IsAligned(
Iterator /*d_in*/,
Int2Type<false> /*can_vectorize*/)
{
return false;
}
//---------------------------------------------------------------------
// Constructor
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ AgentReduce(
TempStorage& temp_storage, ///< Reference to temp_storage
InputIteratorT d_in, ///< Input data to reduce
ReductionOp reduction_op) ///< Binary reduction operator
:
temp_storage(temp_storage.Alias()),
d_in(d_in),
d_wrapped_in(d_in),
reduction_op(reduction_op)
{}
//---------------------------------------------------------------------
// Tile consumption
//---------------------------------------------------------------------
/**
* Consume a full tile of input (non-vectorized)
*/
template <int IS_FIRST_TILE>
__device__ __forceinline__ void ConsumeTile(
OutputT &thread_aggregate,
OffsetT block_offset, ///< The offset the tile to consume
int /*valid_items*/, ///< The number of valid items in the tile
Int2Type<true> /*is_full_tile*/, ///< Whether or not this is a full tile
Int2Type<false> /*can_vectorize*/) ///< Whether or not we can vectorize loads
{
OutputT items[ITEMS_PER_THREAD];
// Load items in striped fashion
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_wrapped_in + block_offset, items);
// Reduce items within each thread stripe
thread_aggregate = (IS_FIRST_TILE) ?
internal::ThreadReduce(items, reduction_op) :
internal::ThreadReduce(items, reduction_op, thread_aggregate);
}
/**
* Consume a full tile of input (vectorized)
*/
template <int IS_FIRST_TILE>
__device__ __forceinline__ void ConsumeTile(
OutputT &thread_aggregate,
OffsetT block_offset, ///< The offset the tile to consume
int /*valid_items*/, ///< The number of valid items in the tile
Int2Type<true> /*is_full_tile*/, ///< Whether or not this is a full tile
Int2Type<true> /*can_vectorize*/) ///< Whether or not we can vectorize loads
{
// Alias items as an array of VectorT and load it in striped fashion
enum { WORDS = ITEMS_PER_THREAD / VECTOR_LOAD_LENGTH };
// Fabricate a vectorized input iterator
InputT *d_in_unqualified = const_cast<InputT*>(d_in) + block_offset + (threadIdx.x * VECTOR_LOAD_LENGTH);
CacheModifiedInputIterator<AgentReducePolicy::LOAD_MODIFIER, VectorT, OffsetT> d_vec_in(
reinterpret_cast<VectorT*>(d_in_unqualified));
// Load items as vector items
InputT input_items[ITEMS_PER_THREAD];
VectorT *vec_items = reinterpret_cast<VectorT*>(input_items);
#pragma unroll
for (int i = 0; i < WORDS; ++i)
vec_items[i] = d_vec_in[BLOCK_THREADS * i];
// Convert from input type to output type
OutputT items[ITEMS_PER_THREAD];
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
items[i] = input_items[i];
// Reduce items within each thread stripe
thread_aggregate = (IS_FIRST_TILE) ?
internal::ThreadReduce(items, reduction_op) :
internal::ThreadReduce(items, reduction_op, thread_aggregate);
}
/**
* Consume a partial tile of input
*/
template <int IS_FIRST_TILE, int CAN_VECTORIZE>
__device__ __forceinline__ void ConsumeTile(
OutputT &thread_aggregate,
OffsetT block_offset, ///< The offset the tile to consume
int valid_items, ///< The number of valid items in the tile
Int2Type<false> /*is_full_tile*/, ///< Whether or not this is a full tile
Int2Type<CAN_VECTORIZE> /*can_vectorize*/) ///< Whether or not we can vectorize loads
{
// Partial tile
int thread_offset = threadIdx.x;
// Read first item
if ((IS_FIRST_TILE) && (thread_offset < valid_items))
{
thread_aggregate = d_wrapped_in[block_offset + thread_offset];
thread_offset += BLOCK_THREADS;
}
// Continue reading items (block-striped)
while (thread_offset < valid_items)
{
OutputT item = d_wrapped_in[block_offset + thread_offset];
thread_aggregate = reduction_op(thread_aggregate, item);
thread_offset += BLOCK_THREADS;
}
}
//---------------------------------------------------------------
// Consume a contiguous segment of tiles
//---------------------------------------------------------------------
/**
* \brief Reduce a contiguous segment of input tiles
*/
template <int CAN_VECTORIZE>
__device__ __forceinline__ OutputT ConsumeRange(
GridEvenShare<OffsetT> &even_share, ///< GridEvenShare descriptor
Int2Type<CAN_VECTORIZE> can_vectorize) ///< Whether or not we can vectorize loads
{
OutputT thread_aggregate;
if (even_share.block_offset + TILE_ITEMS > even_share.block_end)
{
// First tile isn't full (not all threads have valid items)
int valid_items = even_share.block_end - even_share.block_offset;
ConsumeTile<true>(thread_aggregate, even_share.block_offset, valid_items, Int2Type<false>(), can_vectorize);
return BlockReduceT(temp_storage.reduce).Reduce(thread_aggregate, reduction_op, valid_items);
}
// At least one full block
ConsumeTile<true>(thread_aggregate, even_share.block_offset, TILE_ITEMS, Int2Type<true>(), can_vectorize);
even_share.block_offset += even_share.block_stride;
// Consume subsequent full tiles of input
while (even_share.block_offset + TILE_ITEMS <= even_share.block_end)
{
ConsumeTile<false>(thread_aggregate, even_share.block_offset, TILE_ITEMS, Int2Type<true>(), can_vectorize);
even_share.block_offset += even_share.block_stride;
}
// Consume a partially-full tile
if (even_share.block_offset < even_share.block_end)
{
int valid_items = even_share.block_end - even_share.block_offset;
ConsumeTile<false>(thread_aggregate, even_share.block_offset, valid_items, Int2Type<false>(), can_vectorize);
}
// Compute block-wide reduction (all threads have valid items)
return BlockReduceT(temp_storage.reduce).Reduce(thread_aggregate, reduction_op);
}
/**
* \brief Reduce a contiguous segment of input tiles
*/
__device__ __forceinline__ OutputT ConsumeRange(
OffsetT block_offset, ///< [in] Threadblock begin offset (inclusive)
OffsetT block_end) ///< [in] Threadblock end offset (exclusive)
{
GridEvenShare<OffsetT> even_share;
even_share.template BlockInit<TILE_ITEMS>(block_offset, block_end);
return (IsAligned(d_in + block_offset, Int2Type<ATTEMPT_VECTORIZATION>())) ?
ConsumeRange(even_share, Int2Type<true && ATTEMPT_VECTORIZATION>()) :
ConsumeRange(even_share, Int2Type<false && ATTEMPT_VECTORIZATION>());
}
/**
* Reduce a contiguous segment of input tiles
*/
__device__ __forceinline__ OutputT ConsumeTiles(
GridEvenShare<OffsetT> &even_share) ///< [in] GridEvenShare descriptor
{
// Initialize GRID_MAPPING_STRIP_MINE even-share descriptor for this thread block
even_share.template BlockInit<TILE_ITEMS, GRID_MAPPING_STRIP_MINE>();
return (IsAligned(d_in, Int2Type<ATTEMPT_VECTORIZATION>())) ?
ConsumeRange(even_share, Int2Type<true && ATTEMPT_VECTORIZATION>()) :
ConsumeRange(even_share, Int2Type<false && ATTEMPT_VECTORIZATION>());
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_histogram.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::AgentHistogram implements a stateful abstraction of CUDA thread blocks for participating in device-wide histogram .
*/
#pragma once
#include <iterator>
#include "../util_type.cuh"
#include "../block/block_load.cuh"
#include "../grid/grid_queue.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy
******************************************************************************/
/**
*
*/
enum BlockHistogramMemoryPreference
{
GMEM,
SMEM,
BLEND
};
/**
* Parameterizable tuning policy type for AgentHistogram
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _PIXELS_PER_THREAD, ///< Pixels per thread (per tile of input)
BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use
CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements
bool _RLE_COMPRESS, ///< Whether to perform localized RLE to compress samples before histogramming
BlockHistogramMemoryPreference _MEM_PREFERENCE, ///< Whether to prefer privatized shared-memory bins (versus privatized global-memory bins)
bool _WORK_STEALING> ///< Whether to dequeue tiles from a global work queue
struct AgentHistogramPolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
PIXELS_PER_THREAD = _PIXELS_PER_THREAD, ///< Pixels per thread (per tile of input)
IS_RLE_COMPRESS = _RLE_COMPRESS, ///< Whether to perform localized RLE to compress samples before histogramming
MEM_PREFERENCE = _MEM_PREFERENCE, ///< Whether to prefer privatized shared-memory bins (versus privatized global-memory bins)
IS_WORK_STEALING = _WORK_STEALING, ///< Whether to dequeue tiles from a global work queue
};
static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use
static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief AgentHistogram implements a stateful abstraction of CUDA thread blocks for participating in device-wide histogram .
*/
template <
typename AgentHistogramPolicyT, ///< Parameterized AgentHistogramPolicy tuning policy type
int PRIVATIZED_SMEM_BINS, ///< Number of privatized shared-memory histogram bins of any channel. Zero indicates privatized counters to be maintained in device-accessible memory.
int NUM_CHANNELS, ///< Number of channels interleaved in the input data. Supports up to four channels.
int NUM_ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename SampleIteratorT, ///< Random-access input iterator type for reading samples
typename CounterT, ///< Integer type for counting sample occurrences per histogram bin
typename PrivatizedDecodeOpT, ///< The transform operator type for determining privatized counter indices from samples, one for each channel
typename OutputDecodeOpT, ///< The transform operator type for determining output bin-ids from privatized counter indices, one for each channel
typename OffsetT, ///< Signed integer type for global offsets
int PTX_ARCH = CUB_PTX_ARCH> ///< PTX compute capability
struct AgentHistogram
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
/// The sample type of the input iterator
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
/// The pixel type of SampleT
typedef typename CubVector<SampleT, NUM_CHANNELS>::Type PixelT;
/// The quad type of SampleT
typedef typename CubVector<SampleT, 4>::Type QuadT;
/// Constants
enum
{
BLOCK_THREADS = AgentHistogramPolicyT::BLOCK_THREADS,
PIXELS_PER_THREAD = AgentHistogramPolicyT::PIXELS_PER_THREAD,
SAMPLES_PER_THREAD = PIXELS_PER_THREAD * NUM_CHANNELS,
QUADS_PER_THREAD = SAMPLES_PER_THREAD / 4,
TILE_PIXELS = PIXELS_PER_THREAD * BLOCK_THREADS,
TILE_SAMPLES = SAMPLES_PER_THREAD * BLOCK_THREADS,
IS_RLE_COMPRESS = AgentHistogramPolicyT::IS_RLE_COMPRESS,
MEM_PREFERENCE = (PRIVATIZED_SMEM_BINS > 0) ?
AgentHistogramPolicyT::MEM_PREFERENCE :
GMEM,
IS_WORK_STEALING = AgentHistogramPolicyT::IS_WORK_STEALING,
};
/// Cache load modifier for reading input elements
static const CacheLoadModifier LOAD_MODIFIER = AgentHistogramPolicyT::LOAD_MODIFIER;
/// Input iterator wrapper type (for applying cache modifier)
typedef typename If<IsPointer<SampleIteratorT>::VALUE,
CacheModifiedInputIterator<LOAD_MODIFIER, SampleT, OffsetT>, // Wrap the native input pointer with CacheModifiedInputIterator
SampleIteratorT>::Type // Directly use the supplied input iterator type
WrappedSampleIteratorT;
/// Pixel input iterator type (for applying cache modifier)
typedef CacheModifiedInputIterator<LOAD_MODIFIER, PixelT, OffsetT>
WrappedPixelIteratorT;
/// Qaud input iterator type (for applying cache modifier)
typedef CacheModifiedInputIterator<LOAD_MODIFIER, QuadT, OffsetT>
WrappedQuadIteratorT;
/// Parameterized BlockLoad type for samples
typedef BlockLoad<
SampleT,
BLOCK_THREADS,
SAMPLES_PER_THREAD,
AgentHistogramPolicyT::LOAD_ALGORITHM>
BlockLoadSampleT;
/// Parameterized BlockLoad type for pixels
typedef BlockLoad<
PixelT,
BLOCK_THREADS,
PIXELS_PER_THREAD,
AgentHistogramPolicyT::LOAD_ALGORITHM>
BlockLoadPixelT;
/// Parameterized BlockLoad type for quads
typedef BlockLoad<
QuadT,
BLOCK_THREADS,
QUADS_PER_THREAD,
AgentHistogramPolicyT::LOAD_ALGORITHM>
BlockLoadQuadT;
/// Shared memory type required by this thread block
struct _TempStorage
{
CounterT histograms[NUM_ACTIVE_CHANNELS][PRIVATIZED_SMEM_BINS + 1]; // Smem needed for block-privatized smem histogram (with 1 word of padding)
int tile_idx;
// Aliasable storage layout
union Aliasable
{
typename BlockLoadSampleT::TempStorage sample_load; // Smem needed for loading a tile of samples
typename BlockLoadPixelT::TempStorage pixel_load; // Smem needed for loading a tile of pixels
typename BlockLoadQuadT::TempStorage quad_load; // Smem needed for loading a tile of quads
} aliasable;
};
/// Temporary storage type (unionable)
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
/// Reference to temp_storage
_TempStorage &temp_storage;
/// Sample input iterator (with cache modifier applied, if possible)
WrappedSampleIteratorT d_wrapped_samples;
/// Native pointer for input samples (possibly NULL if unavailable)
SampleT* d_native_samples;
/// The number of output bins for each channel
int (&num_output_bins)[NUM_ACTIVE_CHANNELS];
/// The number of privatized bins for each channel
int (&num_privatized_bins)[NUM_ACTIVE_CHANNELS];
/// Reference to gmem privatized histograms for each channel
CounterT* d_privatized_histograms[NUM_ACTIVE_CHANNELS];
/// Reference to final output histograms (gmem)
CounterT* (&d_output_histograms)[NUM_ACTIVE_CHANNELS];
/// The transform operator for determining output bin-ids from privatized counter indices, one for each channel
OutputDecodeOpT (&output_decode_op)[NUM_ACTIVE_CHANNELS];
/// The transform operator for determining privatized counter indices from samples, one for each channel
PrivatizedDecodeOpT (&privatized_decode_op)[NUM_ACTIVE_CHANNELS];
/// Whether to prefer privatized smem counters vs privatized global counters
bool prefer_smem;
//---------------------------------------------------------------------
// Initialize privatized bin counters
//---------------------------------------------------------------------
// Initialize privatized bin counters
__device__ __forceinline__ void InitBinCounters(CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS])
{
// Initialize histogram bin counts to zeros
#pragma unroll
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
{
for (int privatized_bin = threadIdx.x; privatized_bin < num_privatized_bins[CHANNEL]; privatized_bin += BLOCK_THREADS)
{
privatized_histograms[CHANNEL][privatized_bin] = 0;
}
}
// Barrier to make sure all threads are done updating counters
CTA_SYNC();
}
// Initialize privatized bin counters. Specialized for privatized shared-memory counters
__device__ __forceinline__ void InitSmemBinCounters()
{
CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS];
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
privatized_histograms[CHANNEL] = temp_storage.histograms[CHANNEL];
InitBinCounters(privatized_histograms);
}
// Initialize privatized bin counters. Specialized for privatized global-memory counters
__device__ __forceinline__ void InitGmemBinCounters()
{
InitBinCounters(d_privatized_histograms);
}
//---------------------------------------------------------------------
// Update final output histograms
//---------------------------------------------------------------------
// Update final output histograms from privatized histograms
__device__ __forceinline__ void StoreOutput(CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS])
{
// Barrier to make sure all threads are done updating counters
CTA_SYNC();
// Apply privatized bin counts to output bin counts
#pragma unroll
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
{
int channel_bins = num_privatized_bins[CHANNEL];
for (int privatized_bin = threadIdx.x;
privatized_bin < channel_bins;
privatized_bin += BLOCK_THREADS)
{
int output_bin = -1;
CounterT count = privatized_histograms[CHANNEL][privatized_bin];
bool is_valid = count > 0;
output_decode_op[CHANNEL].template BinSelect<LOAD_MODIFIER>((SampleT) privatized_bin, output_bin, is_valid);
if (output_bin >= 0)
{
atomicAdd(&d_output_histograms[CHANNEL][output_bin], count);
}
}
}
}
// Update final output histograms from privatized histograms. Specialized for privatized shared-memory counters
__device__ __forceinline__ void StoreSmemOutput()
{
CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS];
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
privatized_histograms[CHANNEL] = temp_storage.histograms[CHANNEL];
StoreOutput(privatized_histograms);
}
// Update final output histograms from privatized histograms. Specialized for privatized global-memory counters
__device__ __forceinline__ void StoreGmemOutput()
{
StoreOutput(d_privatized_histograms);
}
//---------------------------------------------------------------------
// Tile accumulation
//---------------------------------------------------------------------
// Accumulate pixels. Specialized for RLE compression.
__device__ __forceinline__ void AccumulatePixels(
SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS],
bool is_valid[PIXELS_PER_THREAD],
CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS],
Int2Type<true> is_rle_compress)
{
#pragma unroll
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
{
// Bin pixels
int bins[PIXELS_PER_THREAD];
#pragma unroll
for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL)
{
bins[PIXEL] = -1;
privatized_decode_op[CHANNEL].template BinSelect<LOAD_MODIFIER>(samples[PIXEL][CHANNEL], bins[PIXEL], is_valid[PIXEL]);
}
CounterT accumulator = 1;
#pragma unroll
for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD - 1; ++PIXEL)
{
if (bins[PIXEL] != bins[PIXEL + 1])
{
if (bins[PIXEL] >= 0)
atomicAdd(privatized_histograms[CHANNEL] + bins[PIXEL], accumulator);
accumulator = 0;
}
accumulator++;
}
// Last pixel
if (bins[PIXELS_PER_THREAD - 1] >= 0)
atomicAdd(privatized_histograms[CHANNEL] + bins[PIXELS_PER_THREAD - 1], accumulator);
}
}
// Accumulate pixels. Specialized for individual accumulation of each pixel.
__device__ __forceinline__ void AccumulatePixels(
SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS],
bool is_valid[PIXELS_PER_THREAD],
CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS],
Int2Type<false> is_rle_compress)
{
#pragma unroll
for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL)
{
#pragma unroll
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
{
int bin = -1;
privatized_decode_op[CHANNEL].template BinSelect<LOAD_MODIFIER>(samples[PIXEL][CHANNEL], bin, is_valid[PIXEL]);
if (bin >= 0)
atomicAdd(privatized_histograms[CHANNEL] + bin, 1);
}
}
}
/**
* Accumulate pixel, specialized for smem privatized histogram
*/
__device__ __forceinline__ void AccumulateSmemPixels(
SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS],
bool is_valid[PIXELS_PER_THREAD])
{
CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS];
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
privatized_histograms[CHANNEL] = temp_storage.histograms[CHANNEL];
AccumulatePixels(samples, is_valid, privatized_histograms, Int2Type<IS_RLE_COMPRESS>());
}
/**
* Accumulate pixel, specialized for gmem privatized histogram
*/
__device__ __forceinline__ void AccumulateGmemPixels(
SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS],
bool is_valid[PIXELS_PER_THREAD])
{
AccumulatePixels(samples, is_valid, d_privatized_histograms, Int2Type<IS_RLE_COMPRESS>());
}
//---------------------------------------------------------------------
// Tile loading
//---------------------------------------------------------------------
// Load full, aligned tile using pixel iterator (multi-channel)
template <int _NUM_ACTIVE_CHANNELS>
__device__ __forceinline__ void LoadFullAlignedTile(
OffsetT block_offset,
int valid_samples,
SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS],
Int2Type<_NUM_ACTIVE_CHANNELS> num_active_channels)
{
typedef PixelT AliasedPixels[PIXELS_PER_THREAD];
WrappedPixelIteratorT d_wrapped_pixels((PixelT*) (d_native_samples + block_offset));
// Load using a wrapped pixel iterator
BlockLoadPixelT(temp_storage.aliasable.pixel_load).Load(
d_wrapped_pixels,
reinterpret_cast<AliasedPixels&>(samples));
}
// Load full, aligned tile using quad iterator (single-channel)
__device__ __forceinline__ void LoadFullAlignedTile(
OffsetT block_offset,
int valid_samples,
SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS],
Int2Type<1> num_active_channels)
{
typedef QuadT AliasedQuads[QUADS_PER_THREAD];
WrappedQuadIteratorT d_wrapped_quads((QuadT*) (d_native_samples + block_offset));
// Load using a wrapped quad iterator
BlockLoadQuadT(temp_storage.aliasable.quad_load).Load(
d_wrapped_quads,
reinterpret_cast<AliasedQuads&>(samples));
}
// Load full, aligned tile
__device__ __forceinline__ void LoadTile(
OffsetT block_offset,
int valid_samples,
SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS],
Int2Type<true> is_full_tile,
Int2Type<true> is_aligned)
{
LoadFullAlignedTile(block_offset, valid_samples, samples, Int2Type<NUM_ACTIVE_CHANNELS>());
}
// Load full, mis-aligned tile using sample iterator
__device__ __forceinline__ void LoadTile(
OffsetT block_offset,
int valid_samples,
SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS],
Int2Type<true> is_full_tile,
Int2Type<false> is_aligned)
{
typedef SampleT AliasedSamples[SAMPLES_PER_THREAD];
// Load using sample iterator
BlockLoadSampleT(temp_storage.aliasable.sample_load).Load(
d_wrapped_samples + block_offset,
reinterpret_cast<AliasedSamples&>(samples));
}
// Load partially-full, aligned tile using the pixel iterator
__device__ __forceinline__ void LoadTile(
OffsetT block_offset,
int valid_samples,
SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS],
Int2Type<false> is_full_tile,
Int2Type<true> is_aligned)
{
typedef PixelT AliasedPixels[PIXELS_PER_THREAD];
WrappedPixelIteratorT d_wrapped_pixels((PixelT*) (d_native_samples + block_offset));
int valid_pixels = valid_samples / NUM_CHANNELS;
// Load using a wrapped pixel iterator
BlockLoadPixelT(temp_storage.aliasable.pixel_load).Load(
d_wrapped_pixels,
reinterpret_cast<AliasedPixels&>(samples),
valid_pixels);
}
// Load partially-full, mis-aligned tile using sample iterator
__device__ __forceinline__ void LoadTile(
OffsetT block_offset,
int valid_samples,
SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS],
Int2Type<false> is_full_tile,
Int2Type<false> is_aligned)
{
typedef SampleT AliasedSamples[SAMPLES_PER_THREAD];
BlockLoadSampleT(temp_storage.aliasable.sample_load).Load(
d_wrapped_samples + block_offset,
reinterpret_cast<AliasedSamples&>(samples),
valid_samples);
}
//---------------------------------------------------------------------
// Tile processing
//---------------------------------------------------------------------
// Consume a tile of data samples
template <
bool IS_ALIGNED, // Whether the tile offset is aligned (quad-aligned for single-channel, pixel-aligned for multi-channel)
bool IS_FULL_TILE> // Whether the tile is full
__device__ __forceinline__ void ConsumeTile(OffsetT block_offset, int valid_samples)
{
SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS];
bool is_valid[PIXELS_PER_THREAD];
// Load tile
LoadTile(
block_offset,
valid_samples,
samples,
Int2Type<IS_FULL_TILE>(),
Int2Type<IS_ALIGNED>());
// Set valid flags
#pragma unroll
for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL)
is_valid[PIXEL] = IS_FULL_TILE || (((threadIdx.x * PIXELS_PER_THREAD + PIXEL) * NUM_CHANNELS) < valid_samples);
// Accumulate samples
#if CUB_PTX_ARCH >= 120
if (prefer_smem)
AccumulateSmemPixels(samples, is_valid);
else
AccumulateGmemPixels(samples, is_valid);
#else
AccumulateGmemPixels(samples, is_valid);
#endif
}
// Consume row tiles. Specialized for work-stealing from queue
template <bool IS_ALIGNED>
__device__ __forceinline__ void ConsumeTiles(
OffsetT num_row_pixels, ///< The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< The number of rows in the region of interest
OffsetT row_stride_samples, ///< The number of samples between starts of consecutive rows in the region of interest
int tiles_per_row, ///< Number of image tiles per row
GridQueue<int> tile_queue,
Int2Type<true> is_work_stealing)
{
int num_tiles = num_rows * tiles_per_row;
int tile_idx = (blockIdx.y * gridDim.x) + blockIdx.x;
OffsetT num_even_share_tiles = gridDim.x * gridDim.y;
while (tile_idx < num_tiles)
{
int row = tile_idx / tiles_per_row;
int col = tile_idx - (row * tiles_per_row);
OffsetT row_offset = row * row_stride_samples;
OffsetT col_offset = (col * TILE_SAMPLES);
OffsetT tile_offset = row_offset + col_offset;
if (col == tiles_per_row - 1)
{
// Consume a partially-full tile at the end of the row
OffsetT num_remaining = (num_row_pixels * NUM_CHANNELS) - col_offset;
ConsumeTile<IS_ALIGNED, false>(tile_offset, num_remaining);
}
else
{
// Consume full tile
ConsumeTile<IS_ALIGNED, true>(tile_offset, TILE_SAMPLES);
}
CTA_SYNC();
// Get next tile
if (threadIdx.x == 0)
temp_storage.tile_idx = tile_queue.Drain(1) + num_even_share_tiles;
CTA_SYNC();
tile_idx = temp_storage.tile_idx;
}
}
// Consume row tiles. Specialized for even-share (striped across thread blocks)
template <bool IS_ALIGNED>
__device__ __forceinline__ void ConsumeTiles(
OffsetT num_row_pixels, ///< The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< The number of rows in the region of interest
OffsetT row_stride_samples, ///< The number of samples between starts of consecutive rows in the region of interest
int tiles_per_row, ///< Number of image tiles per row
GridQueue<int> tile_queue,
Int2Type<false> is_work_stealing)
{
for (int row = blockIdx.y; row < num_rows; row += gridDim.y)
{
OffsetT row_begin = row * row_stride_samples;
OffsetT row_end = row_begin + (num_row_pixels * NUM_CHANNELS);
OffsetT tile_offset = row_begin + (blockIdx.x * TILE_SAMPLES);
while (tile_offset < row_end)
{
OffsetT num_remaining = row_end - tile_offset;
if (num_remaining < TILE_SAMPLES)
{
// Consume partial tile
ConsumeTile<IS_ALIGNED, false>(tile_offset, num_remaining);
break;
}
// Consume full tile
ConsumeTile<IS_ALIGNED, true>(tile_offset, TILE_SAMPLES);
tile_offset += gridDim.x * TILE_SAMPLES;
}
}
}
//---------------------------------------------------------------------
// Parameter extraction
//---------------------------------------------------------------------
// Return a native pixel pointer (specialized for CacheModifiedInputIterator types)
template <
CacheLoadModifier _MODIFIER,
typename _ValueT,
typename _OffsetT>
__device__ __forceinline__ SampleT* NativePointer(CacheModifiedInputIterator<_MODIFIER, _ValueT, _OffsetT> itr)
{
return itr.ptr;
}
// Return a native pixel pointer (specialized for other types)
template <typename IteratorT>
__device__ __forceinline__ SampleT* NativePointer(IteratorT itr)
{
return NULL;
}
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ AgentHistogram(
TempStorage &temp_storage, ///< Reference to temp_storage
SampleIteratorT d_samples, ///< Input data to reduce
int (&num_output_bins)[NUM_ACTIVE_CHANNELS], ///< The number bins per final output histogram
int (&num_privatized_bins)[NUM_ACTIVE_CHANNELS], ///< The number bins per privatized histogram
CounterT* (&d_output_histograms)[NUM_ACTIVE_CHANNELS], ///< Reference to final output histograms
CounterT* (&d_privatized_histograms)[NUM_ACTIVE_CHANNELS], ///< Reference to privatized histograms
OutputDecodeOpT (&output_decode_op)[NUM_ACTIVE_CHANNELS], ///< The transform operator for determining output bin-ids from privatized counter indices, one for each channel
PrivatizedDecodeOpT (&privatized_decode_op)[NUM_ACTIVE_CHANNELS]) ///< The transform operator for determining privatized counter indices from samples, one for each channel
:
temp_storage(temp_storage.Alias()),
d_wrapped_samples(d_samples),
num_output_bins(num_output_bins),
num_privatized_bins(num_privatized_bins),
d_output_histograms(d_output_histograms),
privatized_decode_op(privatized_decode_op),
output_decode_op(output_decode_op),
d_native_samples(NativePointer(d_wrapped_samples)),
prefer_smem((MEM_PREFERENCE == SMEM) ?
true : // prefer smem privatized histograms
(MEM_PREFERENCE == GMEM) ?
false : // prefer gmem privatized histograms
blockIdx.x & 1) // prefer blended privatized histograms
{
int blockId = (blockIdx.y * gridDim.x) + blockIdx.x;
// Initialize the locations of this block's privatized histograms
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
this->d_privatized_histograms[CHANNEL] = d_privatized_histograms[CHANNEL] + (blockId * num_privatized_bins[CHANNEL]);
}
/**
* Consume image
*/
__device__ __forceinline__ void ConsumeTiles(
OffsetT num_row_pixels, ///< The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< The number of rows in the region of interest
OffsetT row_stride_samples, ///< The number of samples between starts of consecutive rows in the region of interest
int tiles_per_row, ///< Number of image tiles per row
GridQueue<int> tile_queue) ///< Queue descriptor for assigning tiles of work to thread blocks
{
// Check whether all row starting offsets are quad-aligned (in single-channel) or pixel-aligned (in multi-channel)
int quad_mask = AlignBytes<QuadT>::ALIGN_BYTES - 1;
int pixel_mask = AlignBytes<PixelT>::ALIGN_BYTES - 1;
size_t row_bytes = sizeof(SampleT) * row_stride_samples;
bool quad_aligned_rows = (NUM_CHANNELS == 1) && (SAMPLES_PER_THREAD % 4 == 0) && // Single channel
((size_t(d_native_samples) & quad_mask) == 0) && // ptr is quad-aligned
((num_rows == 1) || ((row_bytes & quad_mask) == 0)); // number of row-samples is a multiple of the alignment of the quad
bool pixel_aligned_rows = (NUM_CHANNELS > 1) && // Multi channel
((size_t(d_native_samples) & pixel_mask) == 0) && // ptr is pixel-aligned
((row_bytes & pixel_mask) == 0); // number of row-samples is a multiple of the alignment of the pixel
// Whether rows are aligned and can be vectorized
if ((d_native_samples != NULL) && (quad_aligned_rows || pixel_aligned_rows))
ConsumeTiles<true>(num_row_pixels, num_rows, row_stride_samples, tiles_per_row, tile_queue, Int2Type<IS_WORK_STEALING>());
else
ConsumeTiles<false>(num_row_pixels, num_rows, row_stride_samples, tiles_per_row, tile_queue, Int2Type<IS_WORK_STEALING>());
}
/**
* Initialize privatized bin counters. Specialized for privatized shared-memory counters
*/
__device__ __forceinline__ void InitBinCounters()
{
if (prefer_smem)
InitSmemBinCounters();
else
InitGmemBinCounters();
}
/**
* Store privatized histogram to device-accessible memory. Specialized for privatized shared-memory counters
*/
__device__ __forceinline__ void StoreOutput()
{
if (prefer_smem)
StoreSmemOutput();
else
StoreGmemOutput();
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_select_if.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::AgentSelectIf implements a stateful abstraction of CUDA thread blocks for participating in device-wide select.
*/
#pragma once
#include <iterator>
#include "single_pass_scan_operators.cuh"
#include "../block/block_load.cuh"
#include "../block/block_store.cuh"
#include "../block/block_scan.cuh"
#include "../block/block_exchange.cuh"
#include "../block/block_discontinuity.cuh"
#include "../grid/grid_queue.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Parameterizable tuning policy type for AgentSelectIf
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use
CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements
BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use
struct AgentSelectIfPolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
};
static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use
static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements
static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief AgentSelectIf implements a stateful abstraction of CUDA thread blocks for participating in device-wide selection
*
* Performs functor-based selection if SelectOpT functor type != NullType
* Otherwise performs flag-based selection if FlagsInputIterator's value type != NullType
* Otherwise performs discontinuity selection (keep unique)
*/
template <
typename AgentSelectIfPolicyT, ///< Parameterized AgentSelectIfPolicy tuning policy type
typename InputIteratorT, ///< Random-access input iterator type for selection items
typename FlagsInputIteratorT, ///< Random-access input iterator type for selections (NullType* if a selection functor or discontinuity flagging is to be used for selection)
typename SelectedOutputIteratorT, ///< Random-access input iterator type for selection_flags items
typename SelectOpT, ///< Selection operator type (NullType if selections or discontinuity flagging is to be used for selection)
typename EqualityOpT, ///< Equality operator type (NullType if selection functor or selections is to be used for selection)
typename OffsetT, ///< Signed integer type for global offsets
bool KEEP_REJECTS> ///< Whether or not we push rejected items to the back of the output
struct AgentSelectIf
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<SelectedOutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<SelectedOutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// The flag value type
typedef typename std::iterator_traits<FlagsInputIteratorT>::value_type FlagT;
// Tile status descriptor interface type
typedef ScanTileState<OffsetT> ScanTileStateT;
// Constants
enum
{
USE_SELECT_OP,
USE_SELECT_FLAGS,
USE_DISCONTINUITY,
BLOCK_THREADS = AgentSelectIfPolicyT::BLOCK_THREADS,
ITEMS_PER_THREAD = AgentSelectIfPolicyT::ITEMS_PER_THREAD,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
TWO_PHASE_SCATTER = (ITEMS_PER_THREAD > 1),
SELECT_METHOD = (!Equals<SelectOpT, NullType>::VALUE) ?
USE_SELECT_OP :
(!Equals<FlagT, NullType>::VALUE) ?
USE_SELECT_FLAGS :
USE_DISCONTINUITY
};
// Cache-modified Input iterator wrapper type (for applying cache modifier) for items
typedef typename If<IsPointer<InputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentSelectIfPolicyT::LOAD_MODIFIER, InputT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator
InputIteratorT>::Type // Directly use the supplied input iterator type
WrappedInputIteratorT;
// Cache-modified Input iterator wrapper type (for applying cache modifier) for values
typedef typename If<IsPointer<FlagsInputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentSelectIfPolicyT::LOAD_MODIFIER, FlagT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator
FlagsInputIteratorT>::Type // Directly use the supplied input iterator type
WrappedFlagsInputIteratorT;
// Parameterized BlockLoad type for input data
typedef BlockLoad<
OutputT,
BLOCK_THREADS,
ITEMS_PER_THREAD,
AgentSelectIfPolicyT::LOAD_ALGORITHM>
BlockLoadT;
// Parameterized BlockLoad type for flags
typedef BlockLoad<
FlagT,
BLOCK_THREADS,
ITEMS_PER_THREAD,
AgentSelectIfPolicyT::LOAD_ALGORITHM>
BlockLoadFlags;
// Parameterized BlockDiscontinuity type for items
typedef BlockDiscontinuity<
OutputT,
BLOCK_THREADS>
BlockDiscontinuityT;
// Parameterized BlockScan type
typedef BlockScan<
OffsetT,
BLOCK_THREADS,
AgentSelectIfPolicyT::SCAN_ALGORITHM>
BlockScanT;
// Callback type for obtaining tile prefix during block scan
typedef TilePrefixCallbackOp<
OffsetT,
cub::Sum,
ScanTileStateT>
TilePrefixCallbackOpT;
// Item exchange type
typedef OutputT ItemExchangeT[TILE_ITEMS];
// Shared memory type for this thread block
union _TempStorage
{
struct
{
typename BlockScanT::TempStorage scan; // Smem needed for tile scanning
typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback
typename BlockDiscontinuityT::TempStorage discontinuity; // Smem needed for discontinuity detection
};
// Smem needed for loading items
typename BlockLoadT::TempStorage load_items;
// Smem needed for loading values
typename BlockLoadFlags::TempStorage load_flags;
// Smem needed for compacting items (allows non POD items in this union)
Uninitialized<ItemExchangeT> raw_exchange;
};
// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
_TempStorage& temp_storage; ///< Reference to temp_storage
WrappedInputIteratorT d_in; ///< Input items
SelectedOutputIteratorT d_selected_out; ///< Unique output items
WrappedFlagsInputIteratorT d_flags_in; ///< Input selection flags (if applicable)
InequalityWrapper<EqualityOpT> inequality_op; ///< T inequality operator
SelectOpT select_op; ///< Selection operator
OffsetT num_items; ///< Total number of input items
//---------------------------------------------------------------------
// Constructor
//---------------------------------------------------------------------
// Constructor
__device__ __forceinline__
AgentSelectIf(
TempStorage &temp_storage, ///< Reference to temp_storage
InputIteratorT d_in, ///< Input data
FlagsInputIteratorT d_flags_in, ///< Input selection flags (if applicable)
SelectedOutputIteratorT d_selected_out, ///< Output data
SelectOpT select_op, ///< Selection operator
EqualityOpT equality_op, ///< Equality operator
OffsetT num_items) ///< Total number of input items
:
temp_storage(temp_storage.Alias()),
d_in(d_in),
d_flags_in(d_flags_in),
d_selected_out(d_selected_out),
select_op(select_op),
inequality_op(equality_op),
num_items(num_items)
{}
//---------------------------------------------------------------------
// Utility methods for initializing the selections
//---------------------------------------------------------------------
/**
* Initialize selections (specialized for selection operator)
*/
template <bool IS_FIRST_TILE, bool IS_LAST_TILE>
__device__ __forceinline__ void InitializeSelections(
OffsetT /*tile_offset*/,
OffsetT num_tile_items,
OutputT (&items)[ITEMS_PER_THREAD],
OffsetT (&selection_flags)[ITEMS_PER_THREAD],
Int2Type<USE_SELECT_OP> /*select_method*/)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
// Out-of-bounds items are selection_flags
selection_flags[ITEM] = 1;
if (!IS_LAST_TILE || (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM < num_tile_items))
selection_flags[ITEM] = select_op(items[ITEM]);
}
}
/**
* Initialize selections (specialized for valid flags)
*/
template <bool IS_FIRST_TILE, bool IS_LAST_TILE>
__device__ __forceinline__ void InitializeSelections(
OffsetT tile_offset,
OffsetT num_tile_items,
OutputT (&/*items*/)[ITEMS_PER_THREAD],
OffsetT (&selection_flags)[ITEMS_PER_THREAD],
Int2Type<USE_SELECT_FLAGS> /*select_method*/)
{
CTA_SYNC();
FlagT flags[ITEMS_PER_THREAD];
if (IS_LAST_TILE)
{
// Out-of-bounds items are selection_flags
BlockLoadFlags(temp_storage.load_flags).Load(d_flags_in + tile_offset, flags, num_tile_items, 1);
}
else
{
BlockLoadFlags(temp_storage.load_flags).Load(d_flags_in + tile_offset, flags);
}
// Convert flag type to selection_flags type
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
selection_flags[ITEM] = flags[ITEM];
}
}
/**
* Initialize selections (specialized for discontinuity detection)
*/
template <bool IS_FIRST_TILE, bool IS_LAST_TILE>
__device__ __forceinline__ void InitializeSelections(
OffsetT tile_offset,
OffsetT num_tile_items,
OutputT (&items)[ITEMS_PER_THREAD],
OffsetT (&selection_flags)[ITEMS_PER_THREAD],
Int2Type<USE_DISCONTINUITY> /*select_method*/)
{
if (IS_FIRST_TILE)
{
CTA_SYNC();
// Set head selection_flags. First tile sets the first flag for the first item
BlockDiscontinuityT(temp_storage.discontinuity).FlagHeads(selection_flags, items, inequality_op);
}
else
{
OutputT tile_predecessor;
if (threadIdx.x == 0)
tile_predecessor = d_in[tile_offset - 1];
CTA_SYNC();
BlockDiscontinuityT(temp_storage.discontinuity).FlagHeads(selection_flags, items, inequality_op, tile_predecessor);
}
// Set selection flags for out-of-bounds items
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
// Set selection_flags for out-of-bounds items
if ((IS_LAST_TILE) && (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items))
selection_flags[ITEM] = 1;
}
}
//---------------------------------------------------------------------
// Scatter utility methods
//---------------------------------------------------------------------
/**
* Scatter flagged items to output offsets (specialized for direct scattering)
*/
template <bool IS_LAST_TILE, bool IS_FIRST_TILE>
__device__ __forceinline__ void ScatterDirect(
OutputT (&items)[ITEMS_PER_THREAD],
OffsetT (&selection_flags)[ITEMS_PER_THREAD],
OffsetT (&selection_indices)[ITEMS_PER_THREAD],
OffsetT num_selections)
{
// Scatter flagged items
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
if (selection_flags[ITEM])
{
if ((!IS_LAST_TILE) || selection_indices[ITEM] < num_selections)
{
d_selected_out[selection_indices[ITEM]] = items[ITEM];
}
}
}
}
/**
* Scatter flagged items to output offsets (specialized for two-phase scattering)
*/
template <bool IS_LAST_TILE, bool IS_FIRST_TILE>
__device__ __forceinline__ void ScatterTwoPhase(
OutputT (&items)[ITEMS_PER_THREAD],
OffsetT (&selection_flags)[ITEMS_PER_THREAD],
OffsetT (&selection_indices)[ITEMS_PER_THREAD],
int /*num_tile_items*/, ///< Number of valid items in this tile
int num_tile_selections, ///< Number of selections in this tile
OffsetT num_selections_prefix, ///< Total number of selections prior to this tile
OffsetT /*num_rejected_prefix*/, ///< Total number of rejections prior to this tile
Int2Type<false> /*is_keep_rejects*/) ///< Marker type indicating whether to keep rejected items in the second partition
{
CTA_SYNC();
// Compact and scatter items
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
int local_scatter_offset = selection_indices[ITEM] - num_selections_prefix;
if (selection_flags[ITEM])
{
temp_storage.raw_exchange.Alias()[local_scatter_offset] = items[ITEM];
}
}
CTA_SYNC();
for (int item = threadIdx.x; item < num_tile_selections; item += BLOCK_THREADS)
{
d_selected_out[num_selections_prefix + item] = temp_storage.raw_exchange.Alias()[item];
}
}
/**
* Scatter flagged items to output offsets (specialized for two-phase scattering)
*/
template <bool IS_LAST_TILE, bool IS_FIRST_TILE>
__device__ __forceinline__ void ScatterTwoPhase(
OutputT (&items)[ITEMS_PER_THREAD],
OffsetT (&selection_flags)[ITEMS_PER_THREAD],
OffsetT (&selection_indices)[ITEMS_PER_THREAD],
int num_tile_items, ///< Number of valid items in this tile
int num_tile_selections, ///< Number of selections in this tile
OffsetT num_selections_prefix, ///< Total number of selections prior to this tile
OffsetT num_rejected_prefix, ///< Total number of rejections prior to this tile
Int2Type<true> /*is_keep_rejects*/) ///< Marker type indicating whether to keep rejected items in the second partition
{
CTA_SYNC();
int tile_num_rejections = num_tile_items - num_tile_selections;
// Scatter items to shared memory (rejections first)
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
int item_idx = (threadIdx.x * ITEMS_PER_THREAD) + ITEM;
int local_selection_idx = selection_indices[ITEM] - num_selections_prefix;
int local_rejection_idx = item_idx - local_selection_idx;
int local_scatter_offset = (selection_flags[ITEM]) ?
tile_num_rejections + local_selection_idx :
local_rejection_idx;
temp_storage.raw_exchange.Alias()[local_scatter_offset] = items[ITEM];
}
CTA_SYNC();
// Gather items from shared memory and scatter to global
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
int item_idx = (ITEM * BLOCK_THREADS) + threadIdx.x;
int rejection_idx = item_idx;
int selection_idx = item_idx - tile_num_rejections;
OffsetT scatter_offset = (item_idx < tile_num_rejections) ?
num_items - num_rejected_prefix - rejection_idx - 1 :
num_selections_prefix + selection_idx;
OutputT item = temp_storage.raw_exchange.Alias()[item_idx];
if (!IS_LAST_TILE || (item_idx < num_tile_items))
{
d_selected_out[scatter_offset] = item;
}
}
}
/**
* Scatter flagged items
*/
template <bool IS_LAST_TILE, bool IS_FIRST_TILE>
__device__ __forceinline__ void Scatter(
OutputT (&items)[ITEMS_PER_THREAD],
OffsetT (&selection_flags)[ITEMS_PER_THREAD],
OffsetT (&selection_indices)[ITEMS_PER_THREAD],
int num_tile_items, ///< Number of valid items in this tile
int num_tile_selections, ///< Number of selections in this tile
OffsetT num_selections_prefix, ///< Total number of selections prior to this tile
OffsetT num_rejected_prefix, ///< Total number of rejections prior to this tile
OffsetT num_selections) ///< Total number of selections including this tile
{
// Do a two-phase scatter if (a) keeping both partitions or (b) two-phase is enabled and the average number of selection_flags items per thread is greater than one
if (KEEP_REJECTS || (TWO_PHASE_SCATTER && (num_tile_selections > BLOCK_THREADS)))
{
ScatterTwoPhase<IS_LAST_TILE, IS_FIRST_TILE>(
items,
selection_flags,
selection_indices,
num_tile_items,
num_tile_selections,
num_selections_prefix,
num_rejected_prefix,
Int2Type<KEEP_REJECTS>());
}
else
{
ScatterDirect<IS_LAST_TILE, IS_FIRST_TILE>(
items,
selection_flags,
selection_indices,
num_selections);
}
}
//---------------------------------------------------------------------
// Cooperatively scan a device-wide sequence of tiles with other CTAs
//---------------------------------------------------------------------
/**
* Process first tile of input (dynamic chained scan). Returns the running count of selections (including this tile)
*/
template <bool IS_LAST_TILE>
__device__ __forceinline__ OffsetT ConsumeFirstTile(
int num_tile_items, ///< Number of input items comprising this tile
OffsetT tile_offset, ///< Tile offset
ScanTileStateT& tile_state) ///< Global tile state descriptor
{
OutputT items[ITEMS_PER_THREAD];
OffsetT selection_flags[ITEMS_PER_THREAD];
OffsetT selection_indices[ITEMS_PER_THREAD];
// Load items
if (IS_LAST_TILE)
BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items, num_tile_items);
else
BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items);
// Initialize selection_flags
InitializeSelections<true, IS_LAST_TILE>(
tile_offset,
num_tile_items,
items,
selection_flags,
Int2Type<SELECT_METHOD>());
CTA_SYNC();
// Exclusive scan of selection_flags
OffsetT num_tile_selections;
BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_tile_selections);
if (threadIdx.x == 0)
{
// Update tile status if this is not the last tile
if (!IS_LAST_TILE)
tile_state.SetInclusive(0, num_tile_selections);
}
// Discount any out-of-bounds selections
if (IS_LAST_TILE)
num_tile_selections -= (TILE_ITEMS - num_tile_items);
// Scatter flagged items
Scatter<IS_LAST_TILE, true>(
items,
selection_flags,
selection_indices,
num_tile_items,
num_tile_selections,
0,
0,
num_tile_selections);
return num_tile_selections;
}
/**
* Process subsequent tile of input (dynamic chained scan). Returns the running count of selections (including this tile)
*/
template <bool IS_LAST_TILE>
__device__ __forceinline__ OffsetT ConsumeSubsequentTile(
int num_tile_items, ///< Number of input items comprising this tile
int tile_idx, ///< Tile index
OffsetT tile_offset, ///< Tile offset
ScanTileStateT& tile_state) ///< Global tile state descriptor
{
OutputT items[ITEMS_PER_THREAD];
OffsetT selection_flags[ITEMS_PER_THREAD];
OffsetT selection_indices[ITEMS_PER_THREAD];
// Load items
if (IS_LAST_TILE)
BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items, num_tile_items);
else
BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items);
// Initialize selection_flags
InitializeSelections<false, IS_LAST_TILE>(
tile_offset,
num_tile_items,
items,
selection_flags,
Int2Type<SELECT_METHOD>());
CTA_SYNC();
// Exclusive scan of values and selection_flags
TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.prefix, cub::Sum(), tile_idx);
BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, prefix_op);
OffsetT num_tile_selections = prefix_op.GetBlockAggregate();
OffsetT num_selections = prefix_op.GetInclusivePrefix();
OffsetT num_selections_prefix = prefix_op.GetExclusivePrefix();
OffsetT num_rejected_prefix = (tile_idx * TILE_ITEMS) - num_selections_prefix;
// Discount any out-of-bounds selections
if (IS_LAST_TILE)
{
int num_discount = TILE_ITEMS - num_tile_items;
num_selections -= num_discount;
num_tile_selections -= num_discount;
}
// Scatter flagged items
Scatter<IS_LAST_TILE, false>(
items,
selection_flags,
selection_indices,
num_tile_items,
num_tile_selections,
num_selections_prefix,
num_rejected_prefix,
num_selections);
return num_selections;
}
/**
* Process a tile of input
*/
template <bool IS_LAST_TILE>
__device__ __forceinline__ OffsetT ConsumeTile(
int num_tile_items, ///< Number of input items comprising this tile
int tile_idx, ///< Tile index
OffsetT tile_offset, ///< Tile offset
ScanTileStateT& tile_state) ///< Global tile state descriptor
{
OffsetT num_selections;
if (tile_idx == 0)
{
num_selections = ConsumeFirstTile<IS_LAST_TILE>(num_tile_items, tile_offset, tile_state);
}
else
{
num_selections = ConsumeSubsequentTile<IS_LAST_TILE>(num_tile_items, tile_idx, tile_offset, tile_state);
}
return num_selections;
}
/**
* Scan tiles of items as part of a dynamic chained scan
*/
template <typename NumSelectedIteratorT> ///< Output iterator type for recording number of items selection_flags
__device__ __forceinline__ void ConsumeRange(
int num_tiles, ///< Total number of input tiles
ScanTileStateT& tile_state, ///< Global tile state descriptor
NumSelectedIteratorT d_num_selected_out) ///< Output total number selection_flags
{
// Blocks are launched in increasing order, so just assign one tile per block
int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index
OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile
if (tile_idx < num_tiles - 1)
{
// Not the last tile (full)
ConsumeTile<false>(TILE_ITEMS, tile_idx, tile_offset, tile_state);
}
else
{
// The last tile (possibly partially-full)
OffsetT num_remaining = num_items - tile_offset;
OffsetT num_selections = ConsumeTile<true>(num_remaining, tile_idx, tile_offset, tile_state);
if (threadIdx.x == 0)
{
// Output the total number of items selection_flags
*d_num_selected_out = num_selections;
}
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/agent/agent_segment_fixup.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::AgentSegmentFixup implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key.
*/
#pragma once
#include <iterator>
#include "single_pass_scan_operators.cuh"
#include "../block/block_load.cuh"
#include "../block/block_store.cuh"
#include "../block/block_scan.cuh"
#include "../block/block_discontinuity.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../iterator/constant_input_iterator.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Parameterizable tuning policy type for AgentSegmentFixup
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use
CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements
BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use
struct AgentSegmentFixupPolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block
ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input)
};
static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use
static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements
static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief AgentSegmentFixup implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key
*/
template <
typename AgentSegmentFixupPolicyT, ///< Parameterized AgentSegmentFixupPolicy tuning policy type
typename PairsInputIteratorT, ///< Random-access input iterator type for keys
typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values
typename EqualityOpT, ///< KeyT equality operator type
typename ReductionOpT, ///< ValueT reduction operator type
typename OffsetT, ///< Signed integer type for global offsets
typename SemiringT> ///< Semiring type
struct AgentSegmentFixup
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
// Data type of key-value input iterator
typedef typename std::iterator_traits<PairsInputIteratorT>::value_type KeyValuePairT;
// Value type
typedef typename KeyValuePairT::Value ValueT;
// Tile status descriptor interface type
typedef ReduceByKeyScanTileState<ValueT, OffsetT> ScanTileStateT;
// Constants
enum
{
BLOCK_THREADS = AgentSegmentFixupPolicyT::BLOCK_THREADS,
ITEMS_PER_THREAD = AgentSegmentFixupPolicyT::ITEMS_PER_THREAD,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
// Whether or not do fixup using RLE + global atomics
// double atomics starting with 6.0
USE_ATOMIC_FIXUP = (((CUB_PTX_ARCH >= 350) &&
(Equals<ValueT, float>::VALUE ||
Equals<ValueT, int>::VALUE ||
Equals<ValueT, unsigned int>::VALUE ||
Equals<ValueT, unsigned long long>::VALUE))
||
((CUB_PTX_ARCH >= 600) &&
(Equals<ValueT, double>::VALUE)))
&& SemiringT::HAS_PLUS_ATOMICS, // don't use atomics for semirings like maxmin
// Whether or not the scan operation has a zero-valued identity value (true if we're performing addition on a primitive type)
// not used.
//HAS_IDENTITY_ZERO = (Equals<ReductionOpT, cub::Sum>::VALUE) && (Traits<ValueT>::PRIMITIVE),
};
// Cache-modified Input iterator wrapper type (for applying cache modifier) for keys
typedef typename If<IsPointer<PairsInputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentSegmentFixupPolicyT::LOAD_MODIFIER, KeyValuePairT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator
PairsInputIteratorT>::Type // Directly use the supplied input iterator type
WrappedPairsInputIteratorT;
// Cache-modified Input iterator wrapper type (for applying cache modifier) for fixup values
typedef typename If<IsPointer<AggregatesOutputIteratorT>::VALUE,
CacheModifiedInputIterator<AgentSegmentFixupPolicyT::LOAD_MODIFIER, ValueT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator
AggregatesOutputIteratorT>::Type // Directly use the supplied input iterator type
WrappedFixupInputIteratorT;
// Reduce-value-by-segment scan operator
typedef ReduceByKeyOp<typename SemiringT::SumOp> ReduceBySegmentOpT;
// Parameterized BlockLoad type for pairs
typedef BlockLoad<
KeyValuePairT,
BLOCK_THREADS,
ITEMS_PER_THREAD,
AgentSegmentFixupPolicyT::LOAD_ALGORITHM>
BlockLoadPairs;
// Parameterized BlockScan type
typedef BlockScan<
KeyValuePairT,
BLOCK_THREADS,
AgentSegmentFixupPolicyT::SCAN_ALGORITHM>
BlockScanT;
// Callback type for obtaining tile prefix during block scan
typedef TilePrefixCallbackOp<
KeyValuePairT,
ReduceBySegmentOpT,
ScanTileStateT>
TilePrefixCallbackOpT;
// Shared memory type for this thread block
union _TempStorage
{
struct
{
typename BlockScanT::TempStorage scan; // Smem needed for tile scanning
typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback
};
// Smem needed for loading keys
typename BlockLoadPairs::TempStorage load_pairs;
};
// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
_TempStorage& temp_storage; ///< Reference to temp_storage
WrappedPairsInputIteratorT d_pairs_in; ///< Input keys
AggregatesOutputIteratorT d_aggregates_out; ///< Output value aggregates
WrappedFixupInputIteratorT d_fixup_in; ///< Fixup input values
InequalityWrapper<EqualityOpT> inequality_op; ///< KeyT inequality operator
ReductionOpT reduction_op; ///< Reduction operator
ReduceBySegmentOpT scan_op; ///< Reduce-by-segment scan operator
//---------------------------------------------------------------------
// Constructor
//---------------------------------------------------------------------
// Constructor
__device__ __forceinline__
AgentSegmentFixup(
TempStorage& temp_storage, ///< Reference to temp_storage
PairsInputIteratorT d_pairs_in, ///< Input keys
AggregatesOutputIteratorT d_aggregates_out, ///< Output value aggregates
EqualityOpT equality_op, ///< KeyT equality operator
ReductionOpT reduction_op) ///< ValueT reduction operator
:
temp_storage(temp_storage.Alias()),
d_pairs_in(d_pairs_in),
d_aggregates_out(d_aggregates_out),
d_fixup_in(d_aggregates_out),
inequality_op(equality_op),
reduction_op(reduction_op),
scan_op(reduction_op)
{}
//---------------------------------------------------------------------
// Cooperatively scan a device-wide sequence of tiles with other CTAs
//---------------------------------------------------------------------
/**
* Process input tile. Specialized for atomic-fixup
*/
template <bool IS_LAST_TILE>
__device__ __forceinline__ void ConsumeTile(
OffsetT max_item, ///< maximum item key, to prevent OOB writes
OffsetT num_remaining, ///< Number of global input items remaining (including this tile)
int tile_idx, ///< Tile index
OffsetT tile_offset, ///< Tile offset
ScanTileStateT& tile_state, ///< Global tile state descriptor
Int2Type<true> use_atomic_fixup) ///< Marker whether to use atomicAdd (instead of reduce-by-key)
{
KeyValuePairT pairs[ITEMS_PER_THREAD];
// Load pairs
KeyValuePairT oob_pair;
oob_pair.key = -1;
if (IS_LAST_TILE)
BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs, num_remaining, oob_pair);
else
BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs);
// RLE
#pragma unroll
for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
ValueT* d_scatter = d_aggregates_out + pairs[ITEM - 1].key;
if (pairs[ITEM].key != pairs[ITEM - 1].key && pairs[ITEM - 1].key < max_item)
atomicAdd(d_scatter, pairs[ITEM - 1].value);
else
pairs[ITEM].value = reduction_op(pairs[ITEM - 1].value, pairs[ITEM].value);
}
// Flush last item if valid
ValueT* d_scatter = d_aggregates_out + pairs[ITEMS_PER_THREAD - 1].key;
if ((!IS_LAST_TILE) || (pairs[ITEMS_PER_THREAD - 1].key >= 0))
atomicAdd(d_scatter, pairs[ITEMS_PER_THREAD - 1].value);
}
/**
* Process input tile. Specialized for reduce-by-key fixup
*/
template <bool IS_LAST_TILE>
__device__ __forceinline__ void ConsumeTile(
OffsetT max_item, ///< maximum item key, to prevent OOB writes
OffsetT num_remaining, ///< Number of global input items remaining (including this tile)
int tile_idx, ///< Tile index
OffsetT tile_offset, ///< Tile offset
ScanTileStateT& tile_state, ///< Global tile state descriptor
Int2Type<false> use_atomic_fixup) ///< Marker whether to use atomicAdd (instead of reduce-by-key)
{
KeyValuePairT pairs[ITEMS_PER_THREAD];
KeyValuePairT scatter_pairs[ITEMS_PER_THREAD];
// Load pairs
KeyValuePairT oob_pair;
oob_pair.key = -1;
if (IS_LAST_TILE)
BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs, num_remaining, oob_pair);
else
BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs);
CTA_SYNC();
KeyValuePairT tile_aggregate;
if (tile_idx == 0)
{
// Exclusive scan of values and segment_flags
BlockScanT(temp_storage.scan).ExclusiveScan(pairs, scatter_pairs, scan_op, tile_aggregate);
// Update tile status if this is not the last tile
if (threadIdx.x == 0)
{
// Set first segment id to not trigger a flush (invalid from exclusive scan)
scatter_pairs[0].key = pairs[0].key;
if (!IS_LAST_TILE)
tile_state.SetInclusive(0, tile_aggregate);
}
}
else
{
// Exclusive scan of values and segment_flags
TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.prefix, scan_op, tile_idx);
BlockScanT(temp_storage.scan).ExclusiveScan(pairs, scatter_pairs, scan_op, prefix_op);
tile_aggregate = prefix_op.GetBlockAggregate();
}
// Scatter updated values
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
if (scatter_pairs[ITEM].key != pairs[ITEM].key && scatter_pairs[ITEM].key < max_item)
{
// Update the value at the key location
ValueT value = d_fixup_in[scatter_pairs[ITEM].key];
value = reduction_op(value, scatter_pairs[ITEM].value);
d_aggregates_out[scatter_pairs[ITEM].key] = value;
}
}
// Finalize the last item
if (IS_LAST_TILE)
{
// Last thread will output final count and last item, if necessary
if (threadIdx.x == BLOCK_THREADS - 1)
{
// If the last tile is a whole tile, the inclusive prefix contains accumulated value reduction for the last segment
if (num_remaining == TILE_ITEMS)
{
// Update the value at the key location
OffsetT last_key = pairs[ITEMS_PER_THREAD - 1].key;
d_aggregates_out[last_key] = reduction_op(tile_aggregate.value, d_fixup_in[last_key]);
}
}
}
}
/**
* Scan tiles of items as part of a dynamic chained scan
*/
__device__ __forceinline__ void ConsumeRange(
OffsetT max_item,
int num_items, ///< Total number of input items
int num_tiles, ///< Total number of input tiles
ScanTileStateT& tile_state) ///< Global tile state descriptor
{
// Blocks are launched in increasing order, so just assign one tile per block
int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index
OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile
OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile)
if (num_remaining > TILE_ITEMS)
{
// Not the last tile (full)
ConsumeTile<false>(max_item, num_remaining, tile_idx, tile_offset, tile_state, Int2Type<USE_ATOMIC_FIXUP>());
}
else if (num_remaining > 0)
{
// The last tile (possibly partially-full)
ConsumeTile<true>(max_item, num_remaining, tile_idx, tile_offset, tile_state, Int2Type<USE_ATOMIC_FIXUP>());
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/host/mutex.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Simple portable mutex
*/
#pragma once
#if (__cplusplus > 199711L) || (defined(_MSC_VER) && _MSC_VER >= 1800)
#include <mutex>
#else
#if defined(_WIN32) || defined(_WIN64)
#include <intrin.h>
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#undef NOMINMAX
/**
* Compiler read/write barrier
*/
#pragma intrinsic(_ReadWriteBarrier)
#endif
#endif
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* Simple portable mutex
* - Wraps std::mutex when compiled with C++11 or newer (supported on all platforms)
* - Uses GNU/Windows spinlock mechanisms for pre C++11 (supported on x86/x64 when compiled with cl.exe or g++)
*/
struct Mutex
{
#if (__cplusplus > 199711L) || (defined(_MSC_VER) && _MSC_VER >= 1800)
std::mutex mtx;
void Lock()
{
mtx.lock();
}
void Unlock()
{
mtx.unlock();
}
void TryLock()
{
mtx.try_lock();
}
#else //__cplusplus > 199711L
#if defined(_MSC_VER)
// Microsoft VC++
typedef long Spinlock;
#else
// GNU g++
typedef int Spinlock;
/**
* Compiler read/write barrier
*/
__forceinline__ void _ReadWriteBarrier()
{
__sync_synchronize();
}
/**
* Atomic exchange
*/
__forceinline__ long _InterlockedExchange(volatile int * const Target, const int Value)
{
// NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier
_ReadWriteBarrier();
return __sync_lock_test_and_set(Target, Value);
}
/**
* Pause instruction to prevent excess processor bus usage
*/
__forceinline__ void YieldProcessor()
{
}
#endif // defined(_MSC_VER)
/// Lock member
volatile Spinlock lock;
/**
* Constructor
*/
Mutex() : lock(0) {}
/**
* Return when the specified spinlock has been acquired
*/
__forceinline__ void Lock()
{
while (1)
{
if (!_InterlockedExchange(&lock, 1)) return;
while (lock) YieldProcessor();
}
}
/**
* Release the specified spinlock
*/
__forceinline__ void Unlock()
{
_ReadWriteBarrier();
lock = 0;
}
#endif // __cplusplus > 199711L
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_histogram.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* The cub::BlockHistogram class provides [<em>collective</em>](index.html#sec0) methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block.
*/
#pragma once
#include "specializations/block_histogram_sort.cuh"
#include "specializations/block_histogram_atomic.cuh"
#include "../util_ptx.cuh"
#include "../util_arch.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Algorithmic variants
******************************************************************************/
/**
* \brief BlockHistogramAlgorithm enumerates alternative algorithms for the parallel construction of block-wide histograms.
*/
enum BlockHistogramAlgorithm
{
/**
* \par Overview
* Sorting followed by differentiation. Execution is comprised of two phases:
* -# Sort the data using efficient radix sort
* -# Look for "runs" of same-valued keys by detecting discontinuities; the run-lengths are histogram bin counts.
*
* \par Performance Considerations
* Delivers consistent throughput regardless of sample bin distribution.
*/
BLOCK_HISTO_SORT,
/**
* \par Overview
* Use atomic addition to update byte counts directly
*
* \par Performance Considerations
* Performance is strongly tied to the hardware implementation of atomic
* addition, and may be significantly degraded for non uniformly-random
* input distributions where many concurrent updates are likely to be
* made to the same bin counter.
*/
BLOCK_HISTO_ATOMIC,
};
/******************************************************************************
* Block histogram
******************************************************************************/
/**
* \brief The BlockHistogram class provides [<em>collective</em>](index.html#sec0) methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. ![](histogram_logo.png)
* \ingroup BlockModule
*
* \tparam T The sample type being histogrammed (must be castable to an integer bin identifier)
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam ITEMS_PER_THREAD The number of items per thread
* \tparam BINS The number bins within the histogram
* \tparam ALGORITHM <b>[optional]</b> cub::BlockHistogramAlgorithm enumerator specifying the underlying algorithm to use (default: cub::BLOCK_HISTO_SORT)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*
* \par Overview
* - A <a href="http://en.wikipedia.org/wiki/Histogram"><em>histogram</em></a>
* counts the number of observations that fall into each of the disjoint categories (known as <em>bins</em>).
* - BlockHistogram can be optionally specialized to use different algorithms:
* -# <b>cub::BLOCK_HISTO_SORT</b>. Sorting followed by differentiation. [More...](\ref cub::BlockHistogramAlgorithm)
* -# <b>cub::BLOCK_HISTO_ATOMIC</b>. Use atomic addition to update byte counts directly. [More...](\ref cub::BlockHistogramAlgorithm)
*
* \par Performance Considerations
* - \granularity
*
* \par A Simple Example
* \blockcollective{BlockHistogram}
* \par
* The code snippet below illustrates a 256-bin histogram of 512 integer samples that
* are partitioned across 128 threads where each thread owns 4 samples.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_histogram.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each
* typedef cub::BlockHistogram<unsigned char, 128, 4, 256> BlockHistogram;
*
* // Allocate shared memory for BlockHistogram
* __shared__ typename BlockHistogram::TempStorage temp_storage;
*
* // Allocate shared memory for block-wide histogram bin counts
* __shared__ unsigned int smem_histogram[256];
*
* // Obtain input samples per thread
* unsigned char data[4];
* ...
*
* // Compute the block-wide histogram
* BlockHistogram(temp_storage).Histogram(data, smem_histogram);
*
* \endcode
*
* \par Performance and Usage Considerations
* - The histogram output can be constructed in shared or device-accessible memory
* - See cub::BlockHistogramAlgorithm for performance details regarding algorithmic alternatives
*
*/
template <
typename T,
int BLOCK_DIM_X,
int ITEMS_PER_THREAD,
int BINS,
BlockHistogramAlgorithm ALGORITHM = BLOCK_HISTO_SORT,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockHistogram
{
private:
/******************************************************************************
* Constants and type definitions
******************************************************************************/
/// Constants
enum
{
/// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
};
/**
* Ensure the template parameterization meets the requirements of the
* targeted device architecture. BLOCK_HISTO_ATOMIC can only be used
* on version SM120 or later. Otherwise BLOCK_HISTO_SORT is used
* regardless.
*/
static const BlockHistogramAlgorithm SAFE_ALGORITHM =
((ALGORITHM == BLOCK_HISTO_ATOMIC) && (PTX_ARCH < 120)) ?
BLOCK_HISTO_SORT :
ALGORITHM;
/// Internal specialization.
typedef typename If<(SAFE_ALGORITHM == BLOCK_HISTO_SORT),
BlockHistogramSort<T, BLOCK_DIM_X, ITEMS_PER_THREAD, BINS, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH>,
BlockHistogramAtomic<BINS> >::Type InternalBlockHistogram;
/// Shared memory storage layout type for BlockHistogram
typedef typename InternalBlockHistogram::TempStorage _TempStorage;
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
/******************************************************************************
* Utility methods
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
public:
/// \smemstorage{BlockHistogram}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockHistogram()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockHistogram(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Histogram operations
*********************************************************************/
//@{
/**
* \brief Initialize the shared histogram counters to zero.
*
* \par Snippet
* The code snippet below illustrates a the initialization and update of a
* histogram of 512 integer samples that are partitioned across 128 threads
* where each thread owns 4 samples.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_histogram.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each
* typedef cub::BlockHistogram<unsigned char, 128, 4, 256> BlockHistogram;
*
* // Allocate shared memory for BlockHistogram
* __shared__ typename BlockHistogram::TempStorage temp_storage;
*
* // Allocate shared memory for block-wide histogram bin counts
* __shared__ unsigned int smem_histogram[256];
*
* // Obtain input samples per thread
* unsigned char thread_samples[4];
* ...
*
* // Initialize the block-wide histogram
* BlockHistogram(temp_storage).InitHistogram(smem_histogram);
*
* // Update the block-wide histogram
* BlockHistogram(temp_storage).Composite(thread_samples, smem_histogram);
*
* \endcode
*
* \tparam CounterT <b>[inferred]</b> Histogram counter type
*/
template <typename CounterT >
__device__ __forceinline__ void InitHistogram(CounterT histogram[BINS])
{
// Initialize histogram bin counts to zeros
int histo_offset = 0;
#pragma unroll
for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS)
{
histogram[histo_offset + linear_tid] = 0;
}
// Finish up with guarded initialization if necessary
if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS))
{
histogram[histo_offset + linear_tid] = 0;
}
}
/**
* \brief Constructs a block-wide histogram in shared/device-accessible memory. Each thread contributes an array of input elements.
*
* \par
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a 256-bin histogram of 512 integer samples that
* are partitioned across 128 threads where each thread owns 4 samples.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_histogram.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each
* typedef cub::BlockHistogram<unsigned char, 128, 4, 256> BlockHistogram;
*
* // Allocate shared memory for BlockHistogram
* __shared__ typename BlockHistogram::TempStorage temp_storage;
*
* // Allocate shared memory for block-wide histogram bin counts
* __shared__ unsigned int smem_histogram[256];
*
* // Obtain input samples per thread
* unsigned char thread_samples[4];
* ...
*
* // Compute the block-wide histogram
* BlockHistogram(temp_storage).Histogram(thread_samples, smem_histogram);
*
* \endcode
*
* \tparam CounterT <b>[inferred]</b> Histogram counter type
*/
template <
typename CounterT >
__device__ __forceinline__ void Histogram(
T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram
CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram
{
// Initialize histogram bin counts to zeros
InitHistogram(histogram);
CTA_SYNC();
// Composite the histogram
InternalBlockHistogram(temp_storage).Composite(items, histogram);
}
/**
* \brief Updates an existing block-wide histogram in shared/device-accessible memory. Each thread composites an array of input elements.
*
* \par
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a the initialization and update of a
* histogram of 512 integer samples that are partitioned across 128 threads
* where each thread owns 4 samples.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_histogram.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each
* typedef cub::BlockHistogram<unsigned char, 128, 4, 256> BlockHistogram;
*
* // Allocate shared memory for BlockHistogram
* __shared__ typename BlockHistogram::TempStorage temp_storage;
*
* // Allocate shared memory for block-wide histogram bin counts
* __shared__ unsigned int smem_histogram[256];
*
* // Obtain input samples per thread
* unsigned char thread_samples[4];
* ...
*
* // Initialize the block-wide histogram
* BlockHistogram(temp_storage).InitHistogram(smem_histogram);
*
* // Update the block-wide histogram
* BlockHistogram(temp_storage).Composite(thread_samples, smem_histogram);
*
* \endcode
*
* \tparam CounterT <b>[inferred]</b> Histogram counter type
*/
template <
typename CounterT >
__device__ __forceinline__ void Composite(
T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram
CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram
{
InternalBlockHistogram(temp_storage).Composite(items, histogram);
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_store.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* Operations for writing linear segments of data from the CUDA thread block
*/
#pragma once
#include <iterator>
#include "block_exchange.cuh"
#include "../util_ptx.cuh"
#include "../util_macro.cuh"
#include "../util_type.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilIo
* @{
*/
/******************************************************************//**
* \name Blocked arrangement I/O (direct)
*********************************************************************/
//@{
/**
* \brief Store a blocked arrangement of items across a thread block into a linear segment of items.
*
* \blocked
*
* \tparam T <b>[inferred]</b> The data type to store.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator.
*/
template <
typename T,
int ITEMS_PER_THREAD,
typename OutputIteratorT>
__device__ __forceinline__ void StoreDirectBlocked(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
OutputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD);
// Store directly in thread-blocked order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
thread_itr[ITEM] = items[ITEM];
}
}
/**
* \brief Store a blocked arrangement of items across a thread block into a linear segment of items, guarded by range
*
* \blocked
*
* \tparam T <b>[inferred]</b> The data type to store.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator.
*/
template <
typename T,
int ITEMS_PER_THREAD,
typename OutputIteratorT>
__device__ __forceinline__ void StoreDirectBlocked(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store
int valid_items) ///< [in] Number of valid items to write
{
OutputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD);
// Store directly in thread-blocked order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
if (ITEM + (linear_tid * ITEMS_PER_THREAD) < valid_items)
{
thread_itr[ITEM] = items[ITEM];
}
}
}
/**
* \brief Store a blocked arrangement of items across a thread block into a linear segment of items.
*
* \blocked
*
* The output offset (\p block_ptr + \p block_offset) must be quad-item aligned,
* which is the default starting offset returned by \p cudaMalloc()
*
* \par
* The following conditions will prevent vectorization and storing will fall back to cub::BLOCK_STORE_DIRECT:
* - \p ITEMS_PER_THREAD is odd
* - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.)
*
* \tparam T <b>[inferred]</b> The data type to store.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
*
*/
template <
typename T,
int ITEMS_PER_THREAD>
__device__ __forceinline__ void StoreDirectBlockedVectorized(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
T *block_ptr, ///< [in] Input pointer for storing from
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
enum
{
// Maximum CUDA vector size is 4 elements
MAX_VEC_SIZE = CUB_MIN(4, ITEMS_PER_THREAD),
// Vector size must be a power of two and an even divisor of the items per thread
VEC_SIZE = ((((MAX_VEC_SIZE - 1) & MAX_VEC_SIZE) == 0) && ((ITEMS_PER_THREAD % MAX_VEC_SIZE) == 0)) ?
MAX_VEC_SIZE :
1,
VECTORS_PER_THREAD = ITEMS_PER_THREAD / VEC_SIZE,
};
// Vector type
typedef typename CubVector<T, VEC_SIZE>::Type Vector;
// Alias global pointer
Vector *block_ptr_vectors = reinterpret_cast<Vector*>(const_cast<T*>(block_ptr));
// Alias pointers (use "raw" array here which should get optimized away to prevent conservative PTXAS lmem spilling)
Vector raw_vector[VECTORS_PER_THREAD];
T *raw_items = reinterpret_cast<T*>(raw_vector);
// Copy
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
raw_items[ITEM] = items[ITEM];
}
// Direct-store using vector types
StoreDirectBlocked(linear_tid, block_ptr_vectors, raw_vector);
}
//@} end member group
/******************************************************************//**
* \name Striped arrangement I/O (direct)
*********************************************************************/
//@{
/**
* \brief Store a striped arrangement of data across the thread block into a linear segment of items.
*
* \striped
*
* \tparam BLOCK_THREADS The thread block size in threads
* \tparam T <b>[inferred]</b> The data type to store.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator.
*/
template <
int BLOCK_THREADS,
typename T,
int ITEMS_PER_THREAD,
typename OutputIteratorT>
__device__ __forceinline__ void StoreDirectStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
OutputIteratorT thread_itr = block_itr + linear_tid;
// Store directly in striped order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
thread_itr[(ITEM * BLOCK_THREADS)] = items[ITEM];
}
}
/**
* \brief Store a striped arrangement of data across the thread block into a linear segment of items, guarded by range
*
* \striped
*
* \tparam BLOCK_THREADS The thread block size in threads
* \tparam T <b>[inferred]</b> The data type to store.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator.
*/
template <
int BLOCK_THREADS,
typename T,
int ITEMS_PER_THREAD,
typename OutputIteratorT>
__device__ __forceinline__ void StoreDirectStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store
int valid_items) ///< [in] Number of valid items to write
{
OutputIteratorT thread_itr = block_itr + linear_tid;
// Store directly in striped order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
if ((ITEM * BLOCK_THREADS) + linear_tid < valid_items)
{
thread_itr[(ITEM * BLOCK_THREADS)] = items[ITEM];
}
}
}
//@} end member group
/******************************************************************//**
* \name Warp-striped arrangement I/O (direct)
*********************************************************************/
//@{
/**
* \brief Store a warp-striped arrangement of data across the thread block into a linear segment of items.
*
* \warpstriped
*
* \par Usage Considerations
* The number of threads in the thread block must be a multiple of the architecture's warp size.
*
* \tparam T <b>[inferred]</b> The data type to store.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator.
*/
template <
typename T,
int ITEMS_PER_THREAD,
typename OutputIteratorT>
__device__ __forceinline__ void StoreDirectWarpStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1);
int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS;
int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD;
OutputIteratorT thread_itr = block_itr + warp_offset + tid;
// Store directly in warp-striped order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
thread_itr[(ITEM * CUB_PTX_WARP_THREADS)] = items[ITEM];
}
}
/**
* \brief Store a warp-striped arrangement of data across the thread block into a linear segment of items, guarded by range
*
* \warpstriped
*
* \par Usage Considerations
* The number of threads in the thread block must be a multiple of the architecture's warp size.
*
* \tparam T <b>[inferred]</b> The data type to store.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator.
*/
template <
typename T,
int ITEMS_PER_THREAD,
typename OutputIteratorT>
__device__ __forceinline__ void StoreDirectWarpStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store
int valid_items) ///< [in] Number of valid items to write
{
int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1);
int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS;
int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD;
OutputIteratorT thread_itr = block_itr + warp_offset + tid;
// Store directly in warp-striped order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
if (warp_offset + tid + (ITEM * CUB_PTX_WARP_THREADS) < valid_items)
{
thread_itr[(ITEM * CUB_PTX_WARP_THREADS)] = items[ITEM];
}
}
}
//@} end member group
/** @} */ // end group UtilIo
//-----------------------------------------------------------------------------
// Generic BlockStore abstraction
//-----------------------------------------------------------------------------
/**
* \brief cub::BlockStoreAlgorithm enumerates alternative algorithms for cub::BlockStore to write a blocked arrangement of items across a CUDA thread block to a linear segment of memory.
*/
enum BlockStoreAlgorithm
{
/**
* \par Overview
*
* A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is written
* directly to memory.
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) decreases as the
* access stride between threads increases (i.e., the number items per thread).
*/
BLOCK_STORE_DIRECT,
/**
* \par Overview
*
* A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is written directly
* to memory using CUDA's built-in vectorized stores as a coalescing optimization.
* For example, <tt>st.global.v4.s32</tt> instructions will be generated
* when \p T = \p int and \p ITEMS_PER_THREAD % 4 == 0.
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) remains high until the the
* access stride between threads (i.e., the number items per thread) exceeds the
* maximum vector store width (typically 4 items or 64B, whichever is lower).
* - The following conditions will prevent vectorization and writing will fall back to cub::BLOCK_STORE_DIRECT:
* - \p ITEMS_PER_THREAD is odd
* - The \p OutputIteratorT is not a simple pointer type
* - The block output offset is not quadword-aligned
* - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.)
*/
BLOCK_STORE_VECTORIZE,
/**
* \par Overview
* A [<em>blocked arrangement</em>](index.html#sec5sec3) is locally
* transposed and then efficiently written to memory as a [<em>striped arrangement</em>](index.html#sec5sec3).
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) remains high regardless
* of items written per thread.
* - The local reordering incurs slightly longer latencies and throughput than the
* direct cub::BLOCK_STORE_DIRECT and cub::BLOCK_STORE_VECTORIZE alternatives.
*/
BLOCK_STORE_TRANSPOSE,
/**
* \par Overview
* A [<em>blocked arrangement</em>](index.html#sec5sec3) is locally
* transposed and then efficiently written to memory as a
* [<em>warp-striped arrangement</em>](index.html#sec5sec3)
*
* \par Usage Considerations
* - BLOCK_THREADS must be a multiple of WARP_THREADS
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) remains high regardless
* of items written per thread.
* - The local reordering incurs slightly longer latencies and throughput than the
* direct cub::BLOCK_STORE_DIRECT and cub::BLOCK_STORE_VECTORIZE alternatives.
*/
BLOCK_STORE_WARP_TRANSPOSE,
/**
* \par Overview
* A [<em>blocked arrangement</em>](index.html#sec5sec3) is locally
* transposed and then efficiently written to memory as a
* [<em>warp-striped arrangement</em>](index.html#sec5sec3)
* To reduce the shared memory requirement, only one warp's worth of shared
* memory is provisioned and is subsequently time-sliced among warps.
*
* \par Usage Considerations
* - BLOCK_THREADS must be a multiple of WARP_THREADS
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) remains high regardless
* of items written per thread.
* - Provisions less shared memory temporary storage, but incurs larger
* latencies than the BLOCK_STORE_WARP_TRANSPOSE alternative.
*/
BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED,
};
/**
* \brief The BlockStore class provides [<em>collective</em>](index.html#sec0) data movement methods for writing a [<em>blocked arrangement</em>](index.html#sec5sec3) of items partitioned across a CUDA thread block to a linear segment of memory. ![](block_store_logo.png)
* \ingroup BlockModule
* \ingroup UtilIo
*
* \tparam T The type of data to be written.
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam ITEMS_PER_THREAD The number of consecutive items partitioned onto each thread.
* \tparam ALGORITHM <b>[optional]</b> cub::BlockStoreAlgorithm tuning policy enumeration. default: cub::BLOCK_STORE_DIRECT.
* \tparam WARP_TIME_SLICING <b>[optional]</b> Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage). (default: false)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*
* \par Overview
* - The BlockStore class provides a single data movement abstraction that can be specialized
* to implement different cub::BlockStoreAlgorithm strategies. This facilitates different
* performance policies for different architectures, data types, granularity sizes, etc.
* - BlockStore can be optionally specialized by different data movement strategies:
* -# <b>cub::BLOCK_STORE_DIRECT</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is written
* directly to memory. [More...](\ref cub::BlockStoreAlgorithm)
* -# <b>cub::BLOCK_STORE_VECTORIZE</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3)
* of data is written directly to memory using CUDA's built-in vectorized stores as a
* coalescing optimization. [More...](\ref cub::BlockStoreAlgorithm)
* -# <b>cub::BLOCK_STORE_TRANSPOSE</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3)
* is locally transposed into a [<em>striped arrangement</em>](index.html#sec5sec3) which is
* then written to memory. [More...](\ref cub::BlockStoreAlgorithm)
* -# <b>cub::BLOCK_STORE_WARP_TRANSPOSE</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3)
* is locally transposed into a [<em>warp-striped arrangement</em>](index.html#sec5sec3) which is
* then written to memory. [More...](\ref cub::BlockStoreAlgorithm)
* - \rowmajor
*
* \par A Simple Example
* \blockcollective{BlockStore}
* \par
* The code snippet below illustrates the storing of a "blocked" arrangement
* of 512 integers across 128 threads (where each thread owns 4 consecutive items)
* into a linear segment of memory. The store is specialized for \p BLOCK_STORE_WARP_TRANSPOSE,
* meaning items are locally reordered among threads so that memory references will be
* efficiently coalesced using a warp-striped access pattern.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_store.cuh>
*
* __global__ void ExampleKernel(int *d_data, ...)
* {
* // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
*
* // Allocate shared memory for BlockStore
* __shared__ typename BlockStore::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_data[4];
* ...
*
* // Store items to linear memory
* int thread_data[4];
* BlockStore(temp_storage).Store(d_data, thread_data);
*
* \endcode
* \par
* Suppose the set of \p thread_data across the block of threads is
* <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>.
* The output \p d_data will be <tt>0, 1, 2, 3, 4, 5, ...</tt>.
*
*/
template <
typename T,
int BLOCK_DIM_X,
int ITEMS_PER_THREAD,
BlockStoreAlgorithm ALGORITHM = BLOCK_STORE_DIRECT,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockStore
{
private:
/******************************************************************************
* Constants and typed definitions
******************************************************************************/
/// Constants
enum
{
/// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
};
/******************************************************************************
* Algorithmic variants
******************************************************************************/
/// Store helper
template <BlockStoreAlgorithm _POLICY, int DUMMY>
struct StoreInternal;
/**
* BLOCK_STORE_DIRECT specialization of store helper
*/
template <int DUMMY>
struct StoreInternal<BLOCK_STORE_DIRECT, DUMMY>
{
/// Shared memory storage layout type
typedef NullType TempStorage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ StoreInternal(
TempStorage &/*temp_storage*/,
int linear_tid)
:
linear_tid(linear_tid)
{}
/// Store items into a linear segment of memory
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
StoreDirectBlocked(linear_tid, block_itr, items);
}
/// Store items into a linear segment of memory, guarded by range
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store
int valid_items) ///< [in] Number of valid items to write
{
StoreDirectBlocked(linear_tid, block_itr, items, valid_items);
}
};
/**
* BLOCK_STORE_VECTORIZE specialization of store helper
*/
template <int DUMMY>
struct StoreInternal<BLOCK_STORE_VECTORIZE, DUMMY>
{
/// Shared memory storage layout type
typedef NullType TempStorage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ StoreInternal(
TempStorage &/*temp_storage*/,
int linear_tid)
:
linear_tid(linear_tid)
{}
/// Store items into a linear segment of memory, specialized for native pointer types (attempts vectorization)
__device__ __forceinline__ void Store(
T *block_ptr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
StoreDirectBlockedVectorized(linear_tid, block_ptr, items);
}
/// Store items into a linear segment of memory, specialized for opaque input iterators (skips vectorization)
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
StoreDirectBlocked(linear_tid, block_itr, items);
}
/// Store items into a linear segment of memory, guarded by range
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store
int valid_items) ///< [in] Number of valid items to write
{
StoreDirectBlocked(linear_tid, block_itr, items, valid_items);
}
};
/**
* BLOCK_STORE_TRANSPOSE specialization of store helper
*/
template <int DUMMY>
struct StoreInternal<BLOCK_STORE_TRANSPOSE, DUMMY>
{
// BlockExchange utility type for keys
typedef BlockExchange<T, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange;
/// Shared memory storage layout type
struct _TempStorage : BlockExchange::TempStorage
{
/// Temporary storage for partially-full block guard
volatile int valid_items;
};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ StoreInternal(
TempStorage &temp_storage,
int linear_tid)
:
temp_storage(temp_storage.Alias()),
linear_tid(linear_tid)
{}
/// Store items into a linear segment of memory
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
BlockExchange(temp_storage).BlockedToStriped(items);
StoreDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items);
}
/// Store items into a linear segment of memory, guarded by range
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store
int valid_items) ///< [in] Number of valid items to write
{
BlockExchange(temp_storage).BlockedToStriped(items);
if (linear_tid == 0)
temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads
CTA_SYNC();
StoreDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, temp_storage.valid_items);
}
};
/**
* BLOCK_STORE_WARP_TRANSPOSE specialization of store helper
*/
template <int DUMMY>
struct StoreInternal<BLOCK_STORE_WARP_TRANSPOSE, DUMMY>
{
enum
{
WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH)
};
// Assert BLOCK_THREADS must be a multiple of WARP_THREADS
CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS");
// BlockExchange utility type for keys
typedef BlockExchange<T, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange;
/// Shared memory storage layout type
struct _TempStorage : BlockExchange::TempStorage
{
/// Temporary storage for partially-full block guard
volatile int valid_items;
};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ StoreInternal(
TempStorage &temp_storage,
int linear_tid)
:
temp_storage(temp_storage.Alias()),
linear_tid(linear_tid)
{}
/// Store items into a linear segment of memory
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
BlockExchange(temp_storage).BlockedToWarpStriped(items);
StoreDirectWarpStriped(linear_tid, block_itr, items);
}
/// Store items into a linear segment of memory, guarded by range
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store
int valid_items) ///< [in] Number of valid items to write
{
BlockExchange(temp_storage).BlockedToWarpStriped(items);
if (linear_tid == 0)
temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads
CTA_SYNC();
StoreDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items);
}
};
/**
* BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED specialization of store helper
*/
template <int DUMMY>
struct StoreInternal<BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED, DUMMY>
{
enum
{
WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH)
};
// Assert BLOCK_THREADS must be a multiple of WARP_THREADS
CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS");
// BlockExchange utility type for keys
typedef BlockExchange<T, BLOCK_DIM_X, ITEMS_PER_THREAD, true, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange;
/// Shared memory storage layout type
struct _TempStorage : BlockExchange::TempStorage
{
/// Temporary storage for partially-full block guard
volatile int valid_items;
};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ StoreInternal(
TempStorage &temp_storage,
int linear_tid)
:
temp_storage(temp_storage.Alias()),
linear_tid(linear_tid)
{}
/// Store items into a linear segment of memory
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
BlockExchange(temp_storage).BlockedToWarpStriped(items);
StoreDirectWarpStriped(linear_tid, block_itr, items);
}
/// Store items into a linear segment of memory, guarded by range
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store
int valid_items) ///< [in] Number of valid items to write
{
BlockExchange(temp_storage).BlockedToWarpStriped(items);
if (linear_tid == 0)
temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads
CTA_SYNC();
StoreDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items);
}
};
/******************************************************************************
* Type definitions
******************************************************************************/
/// Internal load implementation to use
typedef StoreInternal<ALGORITHM, 0> InternalStore;
/// Shared memory storage layout type
typedef typename InternalStore::TempStorage _TempStorage;
/******************************************************************************
* Utility methods
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/******************************************************************************
* Thread fields
******************************************************************************/
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
public:
/// \smemstorage{BlockStore}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockStore()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockStore(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Data movement
*********************************************************************/
//@{
/**
* \brief Store items into a linear segment of memory.
*
* \par
* - \blocked
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates the storing of a "blocked" arrangement
* of 512 integers across 128 threads (where each thread owns 4 consecutive items)
* into a linear segment of memory. The store is specialized for \p BLOCK_STORE_WARP_TRANSPOSE,
* meaning items are locally reordered among threads so that memory references will be
* efficiently coalesced using a warp-striped access pattern.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_store.cuh>
*
* __global__ void ExampleKernel(int *d_data, ...)
* {
* // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
*
* // Allocate shared memory for BlockStore
* __shared__ typename BlockStore::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_data[4];
* ...
*
* // Store items to linear memory
* int thread_data[4];
* BlockStore(temp_storage).Store(d_data, thread_data);
*
* \endcode
* \par
* Suppose the set of \p thread_data across the block of threads is
* <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>.
* The output \p d_data will be <tt>0, 1, 2, 3, 4, 5, ...</tt>.
*
*/
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store
{
InternalStore(temp_storage, linear_tid).Store(block_itr, items);
}
/**
* \brief Store items into a linear segment of memory, guarded by range.
*
* \par
* - \blocked
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates the guarded storing of a "blocked" arrangement
* of 512 integers across 128 threads (where each thread owns 4 consecutive items)
* into a linear segment of memory. The store is specialized for \p BLOCK_STORE_WARP_TRANSPOSE,
* meaning items are locally reordered among threads so that memory references will be
* efficiently coalesced using a warp-striped access pattern.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_store.cuh>
*
* __global__ void ExampleKernel(int *d_data, int valid_items, ...)
* {
* // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
*
* // Allocate shared memory for BlockStore
* __shared__ typename BlockStore::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_data[4];
* ...
*
* // Store items to linear memory
* int thread_data[4];
* BlockStore(temp_storage).Store(d_data, thread_data, valid_items);
*
* \endcode
* \par
* Suppose the set of \p thread_data across the block of threads is
* <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt> and \p valid_items is \p 5.
* The output \p d_data will be <tt>0, 1, 2, 3, 4, ?, ?, ?, ...</tt>, with
* only the first two threads being unmasked to store portions of valid data.
*
*/
template <typename OutputIteratorT>
__device__ __forceinline__ void Store(
OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to
T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store
int valid_items) ///< [in] Number of valid items to write
{
InternalStore(temp_storage, linear_tid).Store(block_itr, items, valid_items);
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_reduce.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* The cub::BlockReduce class provides [<em>collective</em>](index.html#sec0) methods for computing a parallel reduction of items partitioned across a CUDA thread block.
*/
#pragma once
#include "specializations/block_reduce_raking.cuh"
#include "specializations/block_reduce_raking_commutative_only.cuh"
#include "specializations/block_reduce_warp_reductions.cuh"
#include "../util_ptx.cuh"
#include "../util_type.cuh"
#include "../thread/thread_operators.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Algorithmic variants
******************************************************************************/
/**
* BlockReduceAlgorithm enumerates alternative algorithms for parallel
* reduction across a CUDA thread block.
*/
enum BlockReduceAlgorithm
{
/**
* \par Overview
* An efficient "raking" reduction algorithm that only supports commutative
* reduction operators (true for most operations, e.g., addition).
*
* \par
* Execution is comprised of three phases:
* -# Upsweep sequential reduction in registers (if threads contribute more
* than one input each). Threads in warps other than the first warp place
* their partial reductions into shared memory.
* -# Upsweep sequential reduction in shared memory. Threads within the first
* warp continue to accumulate by raking across segments of shared partial reductions
* -# A warp-synchronous Kogge-Stone style reduction within the raking warp.
*
* \par
* \image html block_reduce.png
* <div class="centercaption">\p BLOCK_REDUCE_RAKING data flow for a hypothetical 16-thread thread block and 4-thread raking warp.</div>
*
* \par Performance Considerations
* - This variant performs less communication than BLOCK_REDUCE_RAKING_NON_COMMUTATIVE
* and is preferable when the reduction operator is commutative. This variant
* applies fewer reduction operators than BLOCK_REDUCE_WARP_REDUCTIONS, and can provide higher overall
* throughput across the GPU when suitably occupied. However, turn-around latency may be
* higher than to BLOCK_REDUCE_WARP_REDUCTIONS and thus less-desirable
* when the GPU is under-occupied.
*/
BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY,
/**
* \par Overview
* An efficient "raking" reduction algorithm that supports commutative
* (e.g., addition) and non-commutative (e.g., string concatenation) reduction
* operators. \blocked.
*
* \par
* Execution is comprised of three phases:
* -# Upsweep sequential reduction in registers (if threads contribute more
* than one input each). Each thread then places the partial reduction
* of its item(s) into shared memory.
* -# Upsweep sequential reduction in shared memory. Threads within a
* single warp rake across segments of shared partial reductions.
* -# A warp-synchronous Kogge-Stone style reduction within the raking warp.
*
* \par
* \image html block_reduce.png
* <div class="centercaption">\p BLOCK_REDUCE_RAKING data flow for a hypothetical 16-thread thread block and 4-thread raking warp.</div>
*
* \par Performance Considerations
* - This variant performs more communication than BLOCK_REDUCE_RAKING
* and is only preferable when the reduction operator is non-commutative. This variant
* applies fewer reduction operators than BLOCK_REDUCE_WARP_REDUCTIONS, and can provide higher overall
* throughput across the GPU when suitably occupied. However, turn-around latency may be
* higher than to BLOCK_REDUCE_WARP_REDUCTIONS and thus less-desirable
* when the GPU is under-occupied.
*/
BLOCK_REDUCE_RAKING,
/**
* \par Overview
* A quick "tiled warp-reductions" reduction algorithm that supports commutative
* (e.g., addition) and non-commutative (e.g., string concatenation) reduction
* operators.
*
* \par
* Execution is comprised of four phases:
* -# Upsweep sequential reduction in registers (if threads contribute more
* than one input each). Each thread then places the partial reduction
* of its item(s) into shared memory.
* -# Compute a shallow, but inefficient warp-synchronous Kogge-Stone style
* reduction within each warp.
* -# A propagation phase where the warp reduction outputs in each warp are
* updated with the aggregate from each preceding warp.
*
* \par
* \image html block_scan_warpscans.png
* <div class="centercaption">\p BLOCK_REDUCE_WARP_REDUCTIONS data flow for a hypothetical 16-thread thread block and 4-thread raking warp.</div>
*
* \par Performance Considerations
* - This variant applies more reduction operators than BLOCK_REDUCE_RAKING
* or BLOCK_REDUCE_RAKING_NON_COMMUTATIVE, which may result in lower overall
* throughput across the GPU. However turn-around latency may be lower and
* thus useful when the GPU is under-occupied.
*/
BLOCK_REDUCE_WARP_REDUCTIONS,
};
/******************************************************************************
* Block reduce
******************************************************************************/
/**
* \brief The BlockReduce class provides [<em>collective</em>](index.html#sec0) methods for computing a parallel reduction of items partitioned across a CUDA thread block. ![](reduce_logo.png)
* \ingroup BlockModule
*
* \tparam T Data type being reduced
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam ALGORITHM <b>[optional]</b> cub::BlockReduceAlgorithm enumerator specifying the underlying algorithm to use (default: cub::BLOCK_REDUCE_WARP_REDUCTIONS)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*
* \par Overview
* - A <a href="http://en.wikipedia.org/wiki/Reduce_(higher-order_function)"><em>reduction</em></a> (or <em>fold</em>)
* uses a binary combining operator to compute a single aggregate from a list of input elements.
* - \rowmajor
* - BlockReduce can be optionally specialized by algorithm to accommodate different latency/throughput workload profiles:
* -# <b>cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY</b>. An efficient "raking" reduction algorithm that only supports commutative reduction operators. [More...](\ref cub::BlockReduceAlgorithm)
* -# <b>cub::BLOCK_REDUCE_RAKING</b>. An efficient "raking" reduction algorithm that supports commutative and non-commutative reduction operators. [More...](\ref cub::BlockReduceAlgorithm)
* -# <b>cub::BLOCK_REDUCE_WARP_REDUCTIONS</b>. A quick "tiled warp-reductions" reduction algorithm that supports commutative and non-commutative reduction operators. [More...](\ref cub::BlockReduceAlgorithm)
*
* \par Performance Considerations
* - \granularity
* - Very efficient (only one synchronization barrier).
* - Incurs zero bank conflicts for most types
* - Computation is slightly more efficient (i.e., having lower instruction overhead) for:
* - Summation (<b><em>vs.</em></b> generic reduction)
* - \p BLOCK_THREADS is a multiple of the architecture's warp size
* - Every thread has a valid input (i.e., full <b><em>vs.</em></b> partial-tiles)
* - See cub::BlockReduceAlgorithm for performance details regarding algorithmic alternatives
*
* \par A Simple Example
* \blockcollective{BlockReduce}
* \par
* The code snippet below illustrates a sum reduction of 512 integer items that
* are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_reduce.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockReduce for a 1D block of 128 threads on type int
* typedef cub::BlockReduce<int, 128> BlockReduce;
*
* // Allocate shared memory for BlockReduce
* __shared__ typename BlockReduce::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_data[4];
* ...
*
* // Compute the block-wide sum for thread0
* int aggregate = BlockReduce(temp_storage).Sum(thread_data);
*
* \endcode
*
*/
template <
typename T,
int BLOCK_DIM_X,
BlockReduceAlgorithm ALGORITHM = BLOCK_REDUCE_WARP_REDUCTIONS,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockReduce
{
private:
/******************************************************************************
* Constants and type definitions
******************************************************************************/
/// Constants
enum
{
/// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
};
typedef BlockReduceWarpReductions<T, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> WarpReductions;
typedef BlockReduceRakingCommutativeOnly<T, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> RakingCommutativeOnly;
typedef BlockReduceRaking<T, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> Raking;
/// Internal specialization type
typedef typename If<(ALGORITHM == BLOCK_REDUCE_WARP_REDUCTIONS),
WarpReductions,
typename If<(ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY),
RakingCommutativeOnly,
Raking>::Type>::Type InternalBlockReduce; // BlockReduceRaking
/// Shared memory storage layout type for BlockReduce
typedef typename InternalBlockReduce::TempStorage _TempStorage;
/******************************************************************************
* Utility methods
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
public:
/// \smemstorage{BlockReduce}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockReduce()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockReduce(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Generic reductions
*********************************************************************/
//@{
/**
* \brief Computes a block-wide reduction for thread<sub>0</sub> using the specified binary reduction functor. Each thread contributes one input element.
*
* \par
* - The return value is undefined in threads other than thread<sub>0</sub>.
* - \rowmajor
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a max reduction of 128 integer items that
* are partitioned across 128 threads.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_reduce.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockReduce for a 1D block of 128 threads on type int
* typedef cub::BlockReduce<int, 128> BlockReduce;
*
* // Allocate shared memory for BlockReduce
* __shared__ typename BlockReduce::TempStorage temp_storage;
*
* // Each thread obtains an input item
* int thread_data;
* ...
*
* // Compute the block-wide max for thread0
* int aggregate = BlockReduce(temp_storage).Reduce(thread_data, cub::Max());
*
* \endcode
*
* \tparam ReductionOp <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt>
*/
template <typename ReductionOp>
__device__ __forceinline__ T Reduce(
T input, ///< [in] Calling thread's input
ReductionOp reduction_op) ///< [in] Binary reduction functor
{
return InternalBlockReduce(temp_storage).template Reduce<true>(input, BLOCK_THREADS, reduction_op);
}
/**
* \brief Computes a block-wide reduction for thread<sub>0</sub> using the specified binary reduction functor. Each thread contributes an array of consecutive input elements.
*
* \par
* - The return value is undefined in threads other than thread<sub>0</sub>.
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a max reduction of 512 integer items that
* are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_reduce.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockReduce for a 1D block of 128 threads on type int
* typedef cub::BlockReduce<int, 128> BlockReduce;
*
* // Allocate shared memory for BlockReduce
* __shared__ typename BlockReduce::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_data[4];
* ...
*
* // Compute the block-wide max for thread0
* int aggregate = BlockReduce(temp_storage).Reduce(thread_data, cub::Max());
*
* \endcode
*
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam ReductionOp <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt>
*/
template <
int ITEMS_PER_THREAD,
typename ReductionOp>
__device__ __forceinline__ T Reduce(
T (&inputs)[ITEMS_PER_THREAD], ///< [in] Calling thread's input segment
ReductionOp reduction_op) ///< [in] Binary reduction functor
{
// Reduce partials
T partial = internal::ThreadReduce(inputs, reduction_op);
return Reduce(partial, reduction_op);
}
/**
* \brief Computes a block-wide reduction for thread<sub>0</sub> using the specified binary reduction functor. The first \p num_valid threads each contribute one input element.
*
* \par
* - The return value is undefined in threads other than thread<sub>0</sub>.
* - \rowmajor
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a max reduction of a partially-full tile of integer items that
* are partitioned across 128 threads.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_reduce.cuh>
*
* __global__ void ExampleKernel(int num_valid, ...)
* {
* // Specialize BlockReduce for a 1D block of 128 threads on type int
* typedef cub::BlockReduce<int, 128> BlockReduce;
*
* // Allocate shared memory for BlockReduce
* __shared__ typename BlockReduce::TempStorage temp_storage;
*
* // Each thread obtains an input item
* int thread_data;
* if (threadIdx.x < num_valid) thread_data = ...
*
* // Compute the block-wide max for thread0
* int aggregate = BlockReduce(temp_storage).Reduce(thread_data, cub::Max(), num_valid);
*
* \endcode
*
* \tparam ReductionOp <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt>
*/
template <typename ReductionOp>
__device__ __forceinline__ T Reduce(
T input, ///< [in] Calling thread's input
ReductionOp reduction_op, ///< [in] Binary reduction functor
int num_valid) ///< [in] Number of threads containing valid elements (may be less than BLOCK_THREADS)
{
// Determine if we scan skip bounds checking
if (num_valid >= BLOCK_THREADS)
{
return InternalBlockReduce(temp_storage).template Reduce<true>(input, num_valid, reduction_op);
}
else
{
return InternalBlockReduce(temp_storage).template Reduce<false>(input, num_valid, reduction_op);
}
}
//@} end member group
/******************************************************************//**
* \name Summation reductions
*********************************************************************/
//@{
/**
* \brief Computes a block-wide reduction for thread<sub>0</sub> using addition (+) as the reduction operator. Each thread contributes one input element.
*
* \par
* - The return value is undefined in threads other than thread<sub>0</sub>.
* - \rowmajor
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sum reduction of 128 integer items that
* are partitioned across 128 threads.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_reduce.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockReduce for a 1D block of 128 threads on type int
* typedef cub::BlockReduce<int, 128> BlockReduce;
*
* // Allocate shared memory for BlockReduce
* __shared__ typename BlockReduce::TempStorage temp_storage;
*
* // Each thread obtains an input item
* int thread_data;
* ...
*
* // Compute the block-wide sum for thread0
* int aggregate = BlockReduce(temp_storage).Sum(thread_data);
*
* \endcode
*
*/
__device__ __forceinline__ T Sum(
T input) ///< [in] Calling thread's input
{
return InternalBlockReduce(temp_storage).template Sum<true>(input, BLOCK_THREADS);
}
/**
* \brief Computes a block-wide reduction for thread<sub>0</sub> using addition (+) as the reduction operator. Each thread contributes an array of consecutive input elements.
*
* \par
* - The return value is undefined in threads other than thread<sub>0</sub>.
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sum reduction of 512 integer items that
* are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_reduce.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockReduce for a 1D block of 128 threads on type int
* typedef cub::BlockReduce<int, 128> BlockReduce;
*
* // Allocate shared memory for BlockReduce
* __shared__ typename BlockReduce::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_data[4];
* ...
*
* // Compute the block-wide sum for thread0
* int aggregate = BlockReduce(temp_storage).Sum(thread_data);
*
* \endcode
*
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
*/
template <int ITEMS_PER_THREAD>
__device__ __forceinline__ T Sum(
T (&inputs)[ITEMS_PER_THREAD]) ///< [in] Calling thread's input segment
{
// Reduce partials
T partial = internal::ThreadReduce(inputs, cub::Sum());
return Sum(partial);
}
/**
* \brief Computes a block-wide reduction for thread<sub>0</sub> using addition (+) as the reduction operator. The first \p num_valid threads each contribute one input element.
*
* \par
* - The return value is undefined in threads other than thread<sub>0</sub>.
* - \rowmajor
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sum reduction of a partially-full tile of integer items that
* are partitioned across 128 threads.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_reduce.cuh>
*
* __global__ void ExampleKernel(int num_valid, ...)
* {
* // Specialize BlockReduce for a 1D block of 128 threads on type int
* typedef cub::BlockReduce<int, 128> BlockReduce;
*
* // Allocate shared memory for BlockReduce
* __shared__ typename BlockReduce::TempStorage temp_storage;
*
* // Each thread obtains an input item (up to num_items)
* int thread_data;
* if (threadIdx.x < num_valid)
* thread_data = ...
*
* // Compute the block-wide sum for thread0
* int aggregate = BlockReduce(temp_storage).Sum(thread_data, num_valid);
*
* \endcode
*
*/
__device__ __forceinline__ T Sum(
T input, ///< [in] Calling thread's input
int num_valid) ///< [in] Number of threads containing valid elements (may be less than BLOCK_THREADS)
{
// Determine if we scan skip bounds checking
if (num_valid >= BLOCK_THREADS)
{
return InternalBlockReduce(temp_storage).template Sum<true>(input, num_valid);
}
else
{
return InternalBlockReduce(temp_storage).template Sum<false>(input, num_valid);
}
}
//@} end member group
};
/**
* \example example_block_reduce.cu
*/
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_radix_rank.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::BlockRadixRank provides operations for ranking unsigned integer types within a CUDA thread block
*/
#pragma once
#include <stdint.h>
#include "../thread/thread_reduce.cuh"
#include "../thread/thread_scan.cuh"
#include "../block/block_scan.cuh"
#include "../util_ptx.cuh"
#include "../util_arch.cuh"
#include "../util_type.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \brief BlockRadixRank provides operations for ranking unsigned integer types within a CUDA thread block.
* \ingroup BlockModule
*
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam RADIX_BITS The number of radix bits per digit place
* \tparam IS_DESCENDING Whether or not the sorted-order is high-to-low
* \tparam MEMOIZE_OUTER_SCAN <b>[optional]</b> Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure (default: true for architectures SM35 and newer, false otherwise). See BlockScanAlgorithm::BLOCK_SCAN_RAKING_MEMOIZE for more details.
* \tparam INNER_SCAN_ALGORITHM <b>[optional]</b> The cub::BlockScanAlgorithm algorithm to use (default: cub::BLOCK_SCAN_WARP_SCANS)
* \tparam SMEM_CONFIG <b>[optional]</b> Shared memory bank mode (default: \p cudaSharedMemBankSizeFourByte)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*
* \par Overview
* Blah...
* - Keys must be in a form suitable for radix ranking (i.e., unsigned bits).
* - \blocked
*
* \par Performance Considerations
* - \granularity
*
* \par Examples
* \par
* - <b>Example 1:</b> Simple radix rank of 32-bit integer keys
* \code
* #include <cub/cub.cuh>
*
* template <int BLOCK_THREADS>
* __global__ void ExampleKernel(...)
* {
*
* \endcode
*/
template <
int BLOCK_DIM_X,
int RADIX_BITS,
bool IS_DESCENDING,
bool MEMOIZE_OUTER_SCAN = (CUB_PTX_ARCH >= 350) ? true : false,
BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS,
cudaSharedMemConfig SMEM_CONFIG = cudaSharedMemBankSizeFourByte,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockRadixRank
{
private:
/******************************************************************************
* Type definitions and constants
******************************************************************************/
// Integer type for digit counters (to be packed into words of type PackedCounters)
typedef unsigned short DigitCounter;
// Integer type for packing DigitCounters into columns of shared memory banks
typedef typename If<(SMEM_CONFIG == cudaSharedMemBankSizeEightByte),
unsigned long long,
unsigned int>::Type PackedCounter;
enum
{
// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
RADIX_DIGITS = 1 << RADIX_BITS,
LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH),
WARP_THREADS = 1 << LOG_WARP_THREADS,
WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS,
BYTES_PER_COUNTER = sizeof(DigitCounter),
LOG_BYTES_PER_COUNTER = Log2<BYTES_PER_COUNTER>::VALUE,
PACKING_RATIO = sizeof(PackedCounter) / sizeof(DigitCounter),
LOG_PACKING_RATIO = Log2<PACKING_RATIO>::VALUE,
LOG_COUNTER_LANES = CUB_MAX((RADIX_BITS - LOG_PACKING_RATIO), 0), // Always at least one lane
COUNTER_LANES = 1 << LOG_COUNTER_LANES,
// The number of packed counters per thread (plus one for padding)
PADDED_COUNTER_LANES = COUNTER_LANES + 1,
RAKING_SEGMENT = PADDED_COUNTER_LANES,
};
public:
enum
{
/// Number of bin-starting offsets tracked per thread
BINS_TRACKED_PER_THREAD = CUB_MAX(1, RADIX_DIGITS / BLOCK_THREADS),
};
private:
/// BlockScan type
typedef BlockScan<
PackedCounter,
BLOCK_DIM_X,
INNER_SCAN_ALGORITHM,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
PTX_ARCH>
BlockScan;
/// Shared memory storage layout type for BlockRadixRank
struct __align__(16) _TempStorage
{
union Aliasable
{
DigitCounter digit_counters[PADDED_COUNTER_LANES][BLOCK_THREADS][PACKING_RATIO];
PackedCounter raking_grid[BLOCK_THREADS][RAKING_SEGMENT];
} aliasable;
// Storage for scanning local ranks
typename BlockScan::TempStorage block_scan;
};
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
/// Copy of raking segment, promoted to registers
PackedCounter cached_segment[RAKING_SEGMENT];
/******************************************************************************
* Utility methods
******************************************************************************/
/**
* Internal storage allocator
*/
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/**
* Performs upsweep raking reduction, returning the aggregate
*/
__device__ __forceinline__ PackedCounter Upsweep()
{
PackedCounter *smem_raking_ptr = temp_storage.aliasable.raking_grid[linear_tid];
PackedCounter *raking_ptr;
if (MEMOIZE_OUTER_SCAN)
{
// Copy data into registers
#pragma unroll
for (int i = 0; i < RAKING_SEGMENT; i++)
{
cached_segment[i] = smem_raking_ptr[i];
}
raking_ptr = cached_segment;
}
else
{
raking_ptr = smem_raking_ptr;
}
return internal::ThreadReduce<RAKING_SEGMENT>(raking_ptr, Sum());
}
/// Performs exclusive downsweep raking scan
__device__ __forceinline__ void ExclusiveDownsweep(
PackedCounter raking_partial)
{
PackedCounter *smem_raking_ptr = temp_storage.aliasable.raking_grid[linear_tid];
PackedCounter *raking_ptr = (MEMOIZE_OUTER_SCAN) ?
cached_segment :
smem_raking_ptr;
// Exclusive raking downsweep scan
internal::ThreadScanExclusive<RAKING_SEGMENT>(raking_ptr, raking_ptr, Sum(), raking_partial);
if (MEMOIZE_OUTER_SCAN)
{
// Copy data back to smem
#pragma unroll
for (int i = 0; i < RAKING_SEGMENT; i++)
{
smem_raking_ptr[i] = cached_segment[i];
}
}
}
/**
* Reset shared memory digit counters
*/
__device__ __forceinline__ void ResetCounters()
{
// Reset shared memory digit counters
#pragma unroll
for (int LANE = 0; LANE < PADDED_COUNTER_LANES; LANE++)
{
*((PackedCounter*) temp_storage.aliasable.digit_counters[LANE][linear_tid]) = 0;
}
}
/**
* Block-scan prefix callback
*/
struct PrefixCallBack
{
__device__ __forceinline__ PackedCounter operator()(PackedCounter block_aggregate)
{
PackedCounter block_prefix = 0;
// Propagate totals in packed fields
#pragma unroll
for (int PACKED = 1; PACKED < PACKING_RATIO; PACKED++)
{
block_prefix += block_aggregate << (sizeof(DigitCounter) * 8 * PACKED);
}
return block_prefix;
}
};
/**
* Scan shared memory digit counters.
*/
__device__ __forceinline__ void ScanCounters()
{
// Upsweep scan
PackedCounter raking_partial = Upsweep();
// Compute exclusive sum
PackedCounter exclusive_partial;
PrefixCallBack prefix_call_back;
BlockScan(temp_storage.block_scan).ExclusiveSum(raking_partial, exclusive_partial, prefix_call_back);
// Downsweep scan with exclusive partial
ExclusiveDownsweep(exclusive_partial);
}
public:
/// \smemstorage{BlockScan}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockRadixRank()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockRadixRank(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Raking
*********************************************************************/
//@{
/**
* \brief Rank keys.
*/
template <
typename UnsignedBits,
int KEYS_PER_THREAD>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile
int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile
int current_bit, ///< [in] The least-significant bit position of the current digit to extract
int num_bits) ///< [in] The number of bits in the current digit
{
DigitCounter thread_prefixes[KEYS_PER_THREAD]; // For each key, the count of previous keys in this tile having the same digit
DigitCounter* digit_counters[KEYS_PER_THREAD]; // For each key, the byte-offset of its corresponding digit counter in smem
// Reset shared memory digit counters
ResetCounters();
#pragma unroll
for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM)
{
// Get digit
unsigned int digit = BFE(keys[ITEM], current_bit, num_bits);
// Get sub-counter
unsigned int sub_counter = digit >> LOG_COUNTER_LANES;
// Get counter lane
unsigned int counter_lane = digit & (COUNTER_LANES - 1);
if (IS_DESCENDING)
{
sub_counter = PACKING_RATIO - 1 - sub_counter;
counter_lane = COUNTER_LANES - 1 - counter_lane;
}
// Pointer to smem digit counter
digit_counters[ITEM] = &temp_storage.aliasable.digit_counters[counter_lane][linear_tid][sub_counter];
// Load thread-exclusive prefix
thread_prefixes[ITEM] = *digit_counters[ITEM];
// Store inclusive prefix
*digit_counters[ITEM] = thread_prefixes[ITEM] + 1;
}
CTA_SYNC();
// Scan shared memory counters
ScanCounters();
CTA_SYNC();
// Extract the local ranks of each key
for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM)
{
// Add in thread block exclusive prefix
ranks[ITEM] = thread_prefixes[ITEM] + *digit_counters[ITEM];
}
}
/**
* \brief Rank keys. For the lower \p RADIX_DIGITS threads, digit counts for each digit are provided for the corresponding thread.
*/
template <
typename UnsignedBits,
int KEYS_PER_THREAD>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile
int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile (out parameter)
int current_bit, ///< [in] The least-significant bit position of the current digit to extract
int num_bits, ///< [in] The number of bits in the current digit
int (&exclusive_digit_prefix)[BINS_TRACKED_PER_THREAD]) ///< [out] The exclusive prefix sum for the digits [(threadIdx.x * BINS_TRACKED_PER_THREAD) ... (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1]
{
// Rank keys
RankKeys(keys, ranks, current_bit, num_bits);
// Get the inclusive and exclusive digit totals corresponding to the calling thread.
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (linear_tid * BINS_TRACKED_PER_THREAD) + track;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
if (IS_DESCENDING)
bin_idx = RADIX_DIGITS - bin_idx - 1;
// Obtain ex/inclusive digit counts. (Unfortunately these all reside in the
// first counter column, resulting in unavoidable bank conflicts.)
unsigned int counter_lane = (bin_idx & (COUNTER_LANES - 1));
unsigned int sub_counter = bin_idx >> (LOG_COUNTER_LANES);
exclusive_digit_prefix[track] = temp_storage.aliasable.digit_counters[counter_lane][0][sub_counter];
}
}
}
};
/**
* Radix-rank using match.any
*/
template <
int BLOCK_DIM_X,
int RADIX_BITS,
bool IS_DESCENDING,
BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockRadixRankMatch
{
private:
/******************************************************************************
* Type definitions and constants
******************************************************************************/
typedef int32_t RankT;
typedef int32_t DigitCounterT;
enum
{
// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
RADIX_DIGITS = 1 << RADIX_BITS,
LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH),
WARP_THREADS = 1 << LOG_WARP_THREADS,
WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS,
PADDED_WARPS = ((WARPS & 0x1) == 0) ?
WARPS + 1 :
WARPS,
COUNTERS = PADDED_WARPS * RADIX_DIGITS,
RAKING_SEGMENT = (COUNTERS + BLOCK_THREADS - 1) / BLOCK_THREADS,
PADDED_RAKING_SEGMENT = ((RAKING_SEGMENT & 0x1) == 0) ?
RAKING_SEGMENT + 1 :
RAKING_SEGMENT,
};
public:
enum
{
/// Number of bin-starting offsets tracked per thread
BINS_TRACKED_PER_THREAD = CUB_MAX(1, RADIX_DIGITS / BLOCK_THREADS),
};
private:
/// BlockScan type
typedef BlockScan<
DigitCounterT,
BLOCK_THREADS,
INNER_SCAN_ALGORITHM,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
PTX_ARCH>
BlockScanT;
/// Shared memory storage layout type for BlockRadixRank
struct __align__(16) _TempStorage
{
typename BlockScanT::TempStorage block_scan;
union __align__(16) Aliasable
{
volatile DigitCounterT warp_digit_counters[RADIX_DIGITS][PADDED_WARPS];
DigitCounterT raking_grid[BLOCK_THREADS][PADDED_RAKING_SEGMENT];
} aliasable;
};
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
public:
/// \smemstorage{BlockScan}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockRadixRankMatch(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Raking
*********************************************************************/
//@{
/**
* \brief Rank keys.
*/
template <
typename UnsignedBits,
int KEYS_PER_THREAD>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile
int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile
int current_bit, ///< [in] The least-significant bit position of the current digit to extract
int num_bits) ///< [in] The number of bits in the current digit
{
// Initialize shared digit counters
#pragma unroll
for (int ITEM = 0; ITEM < PADDED_RAKING_SEGMENT; ++ITEM)
temp_storage.aliasable.raking_grid[linear_tid][ITEM] = 0;
CTA_SYNC();
// Each warp will strip-mine its section of input, one strip at a time
volatile DigitCounterT *digit_counters[KEYS_PER_THREAD];
uint32_t lane_id = LaneId();
uint32_t warp_id = linear_tid >> LOG_WARP_THREADS;
uint32_t lane_mask_lt = LaneMaskLt();
#pragma unroll
for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM)
{
// My digit
uint32_t digit = BFE(keys[ITEM], current_bit, num_bits);
if (IS_DESCENDING)
digit = RADIX_DIGITS - digit - 1;
// Mask of peers who have same digit as me
uint32_t peer_mask = MatchAny<RADIX_BITS>(digit);
// Pointer to smem digit counter for this key
digit_counters[ITEM] = &temp_storage.aliasable.warp_digit_counters[digit][warp_id];
// Number of occurrences in previous strips
DigitCounterT warp_digit_prefix = *digit_counters[ITEM];
// Warp-sync
WARP_SYNC(0xFFFFFFFF);
// Number of peers having same digit as me
int32_t digit_count = __popc(peer_mask);
// Number of lower-ranked peers having same digit seen so far
int32_t peer_digit_prefix = __popc(peer_mask & lane_mask_lt);
if (peer_digit_prefix == 0)
{
// First thread for each digit updates the shared warp counter
*digit_counters[ITEM] = DigitCounterT(warp_digit_prefix + digit_count);
}
// Warp-sync
WARP_SYNC(0xFFFFFFFF);
// Number of prior keys having same digit
ranks[ITEM] = warp_digit_prefix + DigitCounterT(peer_digit_prefix);
}
CTA_SYNC();
// Scan warp counters
DigitCounterT scan_counters[PADDED_RAKING_SEGMENT];
#pragma unroll
for (int ITEM = 0; ITEM < PADDED_RAKING_SEGMENT; ++ITEM)
scan_counters[ITEM] = temp_storage.aliasable.raking_grid[linear_tid][ITEM];
BlockScanT(temp_storage.block_scan).ExclusiveSum(scan_counters, scan_counters);
#pragma unroll
for (int ITEM = 0; ITEM < PADDED_RAKING_SEGMENT; ++ITEM)
temp_storage.aliasable.raking_grid[linear_tid][ITEM] = scan_counters[ITEM];
CTA_SYNC();
// Seed ranks with counter values from previous warps
#pragma unroll
for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM)
ranks[ITEM] += *digit_counters[ITEM];
}
/**
* \brief Rank keys. For the lower \p RADIX_DIGITS threads, digit counts for each digit are provided for the corresponding thread.
*/
template <
typename UnsignedBits,
int KEYS_PER_THREAD>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile
int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile (out parameter)
int current_bit, ///< [in] The least-significant bit position of the current digit to extract
int num_bits, ///< [in] The number of bits in the current digit
int (&exclusive_digit_prefix)[BINS_TRACKED_PER_THREAD]) ///< [out] The exclusive prefix sum for the digits [(threadIdx.x * BINS_TRACKED_PER_THREAD) ... (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1]
{
RankKeys(keys, ranks, current_bit, num_bits);
// Get exclusive count for each digit
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (linear_tid * BINS_TRACKED_PER_THREAD) + track;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
if (IS_DESCENDING)
bin_idx = RADIX_DIGITS - bin_idx - 1;
exclusive_digit_prefix[track] = temp_storage.aliasable.warp_digit_counters[bin_idx][0];
}
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_raking_layout.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* cub::BlockRakingLayout provides a conflict-free shared memory layout abstraction for warp-raking across thread block data.
*/
#pragma once
#include "../util_macro.cuh"
#include "../util_arch.cuh"
#include "../util_type.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \brief BlockRakingLayout provides a conflict-free shared memory layout abstraction for 1D raking across thread block data. ![](raking.png)
* \ingroup BlockModule
*
* \par Overview
* This type facilitates a shared memory usage pattern where a block of CUDA
* threads places elements into shared memory and then reduces the active
* parallelism to one "raking" warp of threads for serially aggregating consecutive
* sequences of shared items. Padding is inserted to eliminate bank conflicts
* (for most data types).
*
* \tparam T The data type to be exchanged.
* \tparam BLOCK_THREADS The thread block size in threads.
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*/
template <
typename T,
int BLOCK_THREADS,
int PTX_ARCH = CUB_PTX_ARCH>
struct BlockRakingLayout
{
//---------------------------------------------------------------------
// Constants and type definitions
//---------------------------------------------------------------------
enum
{
/// The total number of elements that need to be cooperatively reduced
SHARED_ELEMENTS = BLOCK_THREADS,
/// Maximum number of warp-synchronous raking threads
MAX_RAKING_THREADS = CUB_MIN(BLOCK_THREADS, CUB_WARP_THREADS(PTX_ARCH)),
/// Number of raking elements per warp-synchronous raking thread (rounded up)
SEGMENT_LENGTH = (SHARED_ELEMENTS + MAX_RAKING_THREADS - 1) / MAX_RAKING_THREADS,
/// Never use a raking thread that will have no valid data (e.g., when BLOCK_THREADS is 62 and SEGMENT_LENGTH is 2, we should only use 31 raking threads)
RAKING_THREADS = (SHARED_ELEMENTS + SEGMENT_LENGTH - 1) / SEGMENT_LENGTH,
/// Whether we will have bank conflicts (technically we should find out if the GCD is > 1)
HAS_CONFLICTS = (CUB_SMEM_BANKS(PTX_ARCH) % SEGMENT_LENGTH == 0),
/// Degree of bank conflicts (e.g., 4-way)
CONFLICT_DEGREE = (HAS_CONFLICTS) ?
(MAX_RAKING_THREADS * SEGMENT_LENGTH) / CUB_SMEM_BANKS(PTX_ARCH) :
1,
/// Pad each segment length with one element if segment length is not relatively prime to warp size and can't be optimized as a vector load
USE_SEGMENT_PADDING = ((SEGMENT_LENGTH & 1) == 0) && (SEGMENT_LENGTH > 2),
/// Total number of elements in the raking grid
GRID_ELEMENTS = RAKING_THREADS * (SEGMENT_LENGTH + USE_SEGMENT_PADDING),
/// Whether or not we need bounds checking during raking (the number of reduction elements is not a multiple of the number of raking threads)
UNGUARDED = (SHARED_ELEMENTS % RAKING_THREADS == 0),
};
/**
* \brief Shared memory storage type
*/
struct __align__(16) _TempStorage
{
T buff[BlockRakingLayout::GRID_ELEMENTS];
};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
/**
* \brief Returns the location for the calling thread to place data into the grid
*/
static __device__ __forceinline__ T* PlacementPtr(
TempStorage &temp_storage,
unsigned int linear_tid)
{
// Offset for partial
unsigned int offset = linear_tid;
// Add in one padding element for every segment
if (USE_SEGMENT_PADDING > 0)
{
offset += offset / SEGMENT_LENGTH;
}
// Incorporating a block of padding partials every shared memory segment
return temp_storage.Alias().buff + offset;
}
/**
* \brief Returns the location for the calling thread to begin sequential raking
*/
static __device__ __forceinline__ T* RakingPtr(
TempStorage &temp_storage,
unsigned int linear_tid)
{
return temp_storage.Alias().buff + (linear_tid * (SEGMENT_LENGTH + USE_SEGMENT_PADDING));
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_adjacent_difference.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* The cub::BlockDiscontinuity class provides [<em>collective</em>](index.html#sec0) methods for flagging discontinuities within an ordered set of items partitioned across a CUDA thread block.
*/
#pragma once
#include "../util_type.cuh"
#include "../util_ptx.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
template <
typename T,
int BLOCK_DIM_X,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockAdjacentDifference
{
private:
/******************************************************************************
* Constants and type definitions
******************************************************************************/
/// Constants
enum
{
/// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
};
/// Shared memory storage layout type (last element from each thread's input)
struct _TempStorage
{
T first_items[BLOCK_THREADS];
T last_items[BLOCK_THREADS];
};
/******************************************************************************
* Utility methods
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/// Specialization for when FlagOp has third index param
template <typename FlagOp, bool HAS_PARAM = BinaryOpHasIdxParam<T, FlagOp>::HAS_PARAM>
struct ApplyOp
{
// Apply flag operator
static __device__ __forceinline__ T FlagT(FlagOp flag_op, const T &a, const T &b, int idx)
{
return flag_op(b, a, idx);
}
};
/// Specialization for when FlagOp does not have a third index param
template <typename FlagOp>
struct ApplyOp<FlagOp, false>
{
// Apply flag operator
static __device__ __forceinline__ T FlagT(FlagOp flag_op, const T &a, const T &b, int /*idx*/)
{
return flag_op(b, a);
}
};
/// Templated unrolling of item comparison (inductive case)
template <int ITERATION, int MAX_ITERATIONS>
struct Iterate
{
// Head flags
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
static __device__ __forceinline__ void FlagHeads(
int linear_tid,
FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items
FlagOp flag_op) ///< [in] Binary boolean flag predicate
{
preds[ITERATION] = input[ITERATION - 1];
flags[ITERATION] = ApplyOp<FlagOp>::FlagT(
flag_op,
preds[ITERATION],
input[ITERATION],
(linear_tid * ITEMS_PER_THREAD) + ITERATION);
Iterate<ITERATION + 1, MAX_ITERATIONS>::FlagHeads(linear_tid, flags, input, preds, flag_op);
}
// Tail flags
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
static __device__ __forceinline__ void FlagTails(
int linear_tid,
FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp flag_op) ///< [in] Binary boolean flag predicate
{
flags[ITERATION] = ApplyOp<FlagOp>::FlagT(
flag_op,
input[ITERATION],
input[ITERATION + 1],
(linear_tid * ITEMS_PER_THREAD) + ITERATION + 1);
Iterate<ITERATION + 1, MAX_ITERATIONS>::FlagTails(linear_tid, flags, input, flag_op);
}
};
/// Templated unrolling of item comparison (termination case)
template <int MAX_ITERATIONS>
struct Iterate<MAX_ITERATIONS, MAX_ITERATIONS>
{
// Head flags
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
static __device__ __forceinline__ void FlagHeads(
int /*linear_tid*/,
FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
T (&/*preds*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items
FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate
{}
// Tail flags
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
static __device__ __forceinline__ void FlagTails(
int /*linear_tid*/,
FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate
{}
};
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
public:
/// \smemstorage{BlockDiscontinuity}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockAdjacentDifference()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockAdjacentDifference(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Head flag operations
*********************************************************************/
//@{
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagHeads(
FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items
FlagOp flag_op) ///< [in] Binary boolean flag predicate
{
// Share last item
temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1];
CTA_SYNC();
if (linear_tid == 0)
{
// Set flag for first thread-item (preds[0] is undefined)
head_flags[0] = 1;
}
else
{
preds[0] = temp_storage.last_items[linear_tid - 1];
head_flags[0] = ApplyOp<FlagOp>::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD);
}
// Set head_flags for remaining items
Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op);
}
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagHeads(
FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items
FlagOp flag_op, ///< [in] Binary boolean flag predicate
T tile_predecessor_item) ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>).
{
// Share last item
temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1];
CTA_SYNC();
// Set flag for first thread-item
preds[0] = (linear_tid == 0) ?
tile_predecessor_item : // First thread
temp_storage.last_items[linear_tid - 1];
head_flags[0] = ApplyOp<FlagOp>::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD);
// Set head_flags for remaining items
Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagHeads(
FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp flag_op) ///< [in] Binary boolean flag predicate
{
T preds[ITEMS_PER_THREAD];
FlagHeads(head_flags, input, preds, flag_op);
}
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagHeads(
FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp flag_op, ///< [in] Binary boolean flag predicate
T tile_predecessor_item) ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>).
{
T preds[ITEMS_PER_THREAD];
FlagHeads(head_flags, input, preds, flag_op, tile_predecessor_item);
}
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagTails(
FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp flag_op) ///< [in] Binary boolean flag predicate
{
// Share first item
temp_storage.first_items[linear_tid] = input[0];
CTA_SYNC();
// Set flag for last thread-item
tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ?
1 : // Last thread
ApplyOp<FlagOp>::FlagT(
flag_op,
input[ITEMS_PER_THREAD - 1],
temp_storage.first_items[linear_tid + 1],
(linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD);
// Set tail_flags for remaining items
Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op);
}
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagTails(
FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp flag_op, ///< [in] Binary boolean flag predicate
T tile_successor_item) ///< [in] <b>[<em>thread</em><sub><tt>BLOCK_THREADS</tt>-1</sub> only]</b> Item with which to compare the last tile item (<tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> from <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>).
{
// Share first item
temp_storage.first_items[linear_tid] = input[0];
CTA_SYNC();
// Set flag for last thread-item
T successor_item = (linear_tid == BLOCK_THREADS - 1) ?
tile_successor_item : // Last thread
temp_storage.first_items[linear_tid + 1];
tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp<FlagOp>::FlagT(
flag_op,
input[ITEMS_PER_THREAD - 1],
successor_item,
(linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD);
// Set tail_flags for remaining items
Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op);
}
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagHeadsAndTails(
FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp flag_op) ///< [in] Binary boolean flag predicate
{
// Share first and last items
temp_storage.first_items[linear_tid] = input[0];
temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1];
CTA_SYNC();
T preds[ITEMS_PER_THREAD];
// Set flag for first thread-item
preds[0] = temp_storage.last_items[linear_tid - 1];
if (linear_tid == 0)
{
head_flags[0] = 1;
}
else
{
head_flags[0] = ApplyOp<FlagOp>::FlagT(
flag_op,
preds[0],
input[0],
linear_tid * ITEMS_PER_THREAD);
}
// Set flag for last thread-item
tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ?
1 : // Last thread
ApplyOp<FlagOp>::FlagT(
flag_op,
input[ITEMS_PER_THREAD - 1],
temp_storage.first_items[linear_tid + 1],
(linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD);
// Set head_flags for remaining items
Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op);
// Set tail_flags for remaining items
Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op);
}
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagHeadsAndTails(
FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags
T tile_successor_item, ///< [in] <b>[<em>thread</em><sub><tt>BLOCK_THREADS</tt>-1</sub> only]</b> Item with which to compare the last tile item (<tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> from <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>).
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp flag_op) ///< [in] Binary boolean flag predicate
{
// Share first and last items
temp_storage.first_items[linear_tid] = input[0];
temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1];
CTA_SYNC();
T preds[ITEMS_PER_THREAD];
// Set flag for first thread-item
if (linear_tid == 0)
{
head_flags[0] = 1;
}
else
{
preds[0] = temp_storage.last_items[linear_tid - 1];
head_flags[0] = ApplyOp<FlagOp>::FlagT(
flag_op,
preds[0],
input[0],
linear_tid * ITEMS_PER_THREAD);
}
// Set flag for last thread-item
T successor_item = (linear_tid == BLOCK_THREADS - 1) ?
tile_successor_item : // Last thread
temp_storage.first_items[linear_tid + 1];
tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp<FlagOp>::FlagT(
flag_op,
input[ITEMS_PER_THREAD - 1],
successor_item,
(linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD);
// Set head_flags for remaining items
Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op);
// Set tail_flags for remaining items
Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op);
}
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagHeadsAndTails(
FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T tile_predecessor_item, ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>).
FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp flag_op) ///< [in] Binary boolean flag predicate
{
// Share first and last items
temp_storage.first_items[linear_tid] = input[0];
temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1];
CTA_SYNC();
T preds[ITEMS_PER_THREAD];
// Set flag for first thread-item
preds[0] = (linear_tid == 0) ?
tile_predecessor_item : // First thread
temp_storage.last_items[linear_tid - 1];
head_flags[0] = ApplyOp<FlagOp>::FlagT(
flag_op,
preds[0],
input[0],
linear_tid * ITEMS_PER_THREAD);
// Set flag for last thread-item
tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ?
1 : // Last thread
ApplyOp<FlagOp>::FlagT(
flag_op,
input[ITEMS_PER_THREAD - 1],
temp_storage.first_items[linear_tid + 1],
(linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD);
// Set head_flags for remaining items
Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op);
// Set tail_flags for remaining items
Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op);
}
template <
int ITEMS_PER_THREAD,
typename FlagT,
typename FlagOp>
__device__ __forceinline__ void FlagHeadsAndTails(
FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags
T tile_predecessor_item, ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>).
FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags
T tile_successor_item, ///< [in] <b>[<em>thread</em><sub><tt>BLOCK_THREADS</tt>-1</sub> only]</b> Item with which to compare the last tile item (<tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> from <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>).
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
FlagOp flag_op) ///< [in] Binary boolean flag predicate
{
// Share first and last items
temp_storage.first_items[linear_tid] = input[0];
temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1];
CTA_SYNC();
T preds[ITEMS_PER_THREAD];
// Set flag for first thread-item
preds[0] = (linear_tid == 0) ?
tile_predecessor_item : // First thread
temp_storage.last_items[linear_tid - 1];
head_flags[0] = ApplyOp<FlagOp>::FlagT(
flag_op,
preds[0],
input[0],
linear_tid * ITEMS_PER_THREAD);
// Set flag for last thread-item
T successor_item = (linear_tid == BLOCK_THREADS - 1) ?
tile_successor_item : // Last thread
temp_storage.first_items[linear_tid + 1];
tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp<FlagOp>::FlagT(
flag_op,
input[ITEMS_PER_THREAD - 1],
successor_item,
(linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD);
// Set head_flags for remaining items
Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op);
// Set tail_flags for remaining items
Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op);
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_radix_sort.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* The cub::BlockRadixSort class provides [<em>collective</em>](index.html#sec0) methods for radix sorting of items partitioned across a CUDA thread block.
*/
#pragma once
#include "block_exchange.cuh"
#include "block_radix_rank.cuh"
#include "../util_ptx.cuh"
#include "../util_arch.cuh"
#include "../util_type.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \brief The BlockRadixSort class provides [<em>collective</em>](index.html#sec0) methods for sorting items partitioned across a CUDA thread block using a radix sorting method. ![](sorting_logo.png)
* \ingroup BlockModule
*
* \tparam KeyT KeyT type
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam ITEMS_PER_THREAD The number of items per thread
* \tparam ValueT <b>[optional]</b> ValueT type (default: cub::NullType, which indicates a keys-only sort)
* \tparam RADIX_BITS <b>[optional]</b> The number of radix bits per digit place (default: 4 bits)
* \tparam MEMOIZE_OUTER_SCAN <b>[optional]</b> Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure (default: true for architectures SM35 and newer, false otherwise).
* \tparam INNER_SCAN_ALGORITHM <b>[optional]</b> The cub::BlockScanAlgorithm algorithm to use (default: cub::BLOCK_SCAN_WARP_SCANS)
* \tparam SMEM_CONFIG <b>[optional]</b> Shared memory bank mode (default: \p cudaSharedMemBankSizeFourByte)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*
* \par Overview
* - The [<em>radix sorting method</em>](http://en.wikipedia.org/wiki/Radix_sort) arranges
* items into ascending order. It relies upon a positional representation for
* keys, i.e., each key is comprised of an ordered sequence of symbols (e.g., digits,
* characters, etc.) specified from least-significant to most-significant. For a
* given input sequence of keys and a set of rules specifying a total ordering
* of the symbolic alphabet, the radix sorting method produces a lexicographic
* ordering of those keys.
* - BlockRadixSort can sort all of the built-in C++ numeric primitive types, e.g.:
* <tt>unsigned char</tt>, \p int, \p double, etc. Within each key, the implementation treats fixed-length
* bit-sequences of \p RADIX_BITS as radix digit places. Although the direct radix sorting
* method can only be applied to unsigned integral types, BlockRadixSort
* is able to sort signed and floating-point types via simple bit-wise transformations
* that ensure lexicographic key ordering.
* - \rowmajor
*
* \par Performance Considerations
* - \granularity
*
* \par A Simple Example
* \blockcollective{BlockRadixSort}
* \par
* The code snippet below illustrates a sort of 512 integer keys that
* are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort;
*
* // Allocate shared memory for BlockRadixSort
* __shared__ typename BlockRadixSort::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* ...
*
* // Collectively sort the keys
* BlockRadixSort(temp_storage).Sort(thread_keys);
*
* ...
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The
* corresponding output \p thread_keys in those threads will be
* <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>.
*
*/
template <
typename KeyT,
int BLOCK_DIM_X,
int ITEMS_PER_THREAD,
typename ValueT = NullType,
int RADIX_BITS = 4,
bool MEMOIZE_OUTER_SCAN = (CUB_PTX_ARCH >= 350) ? true : false,
BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS,
cudaSharedMemConfig SMEM_CONFIG = cudaSharedMemBankSizeFourByte,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockRadixSort
{
private:
/******************************************************************************
* Constants and type definitions
******************************************************************************/
enum
{
// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
// Whether or not there are values to be trucked along with keys
KEYS_ONLY = Equals<ValueT, NullType>::VALUE,
};
// KeyT traits and unsigned bits type
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
/// Ascending BlockRadixRank utility type
typedef BlockRadixRank<
BLOCK_DIM_X,
RADIX_BITS,
false,
MEMOIZE_OUTER_SCAN,
INNER_SCAN_ALGORITHM,
SMEM_CONFIG,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
PTX_ARCH>
AscendingBlockRadixRank;
/// Descending BlockRadixRank utility type
typedef BlockRadixRank<
BLOCK_DIM_X,
RADIX_BITS,
true,
MEMOIZE_OUTER_SCAN,
INNER_SCAN_ALGORITHM,
SMEM_CONFIG,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
PTX_ARCH>
DescendingBlockRadixRank;
/// BlockExchange utility type for keys
typedef BlockExchange<KeyT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchangeKeys;
/// BlockExchange utility type for values
typedef BlockExchange<ValueT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchangeValues;
/// Shared memory storage layout type
union _TempStorage
{
typename AscendingBlockRadixRank::TempStorage asending_ranking_storage;
typename DescendingBlockRadixRank::TempStorage descending_ranking_storage;
typename BlockExchangeKeys::TempStorage exchange_keys;
typename BlockExchangeValues::TempStorage exchange_values;
};
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
/******************************************************************************
* Utility methods
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/// Rank keys (specialized for ascending sort)
__device__ __forceinline__ void RankKeys(
UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD],
int (&ranks)[ITEMS_PER_THREAD],
int begin_bit,
int pass_bits,
Int2Type<false> /*is_descending*/)
{
AscendingBlockRadixRank(temp_storage.asending_ranking_storage).RankKeys(
unsigned_keys,
ranks,
begin_bit,
pass_bits);
}
/// Rank keys (specialized for descending sort)
__device__ __forceinline__ void RankKeys(
UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD],
int (&ranks)[ITEMS_PER_THREAD],
int begin_bit,
int pass_bits,
Int2Type<true> /*is_descending*/)
{
DescendingBlockRadixRank(temp_storage.descending_ranking_storage).RankKeys(
unsigned_keys,
ranks,
begin_bit,
pass_bits);
}
/// ExchangeValues (specialized for key-value sort, to-blocked arrangement)
__device__ __forceinline__ void ExchangeValues(
ValueT (&values)[ITEMS_PER_THREAD],
int (&ranks)[ITEMS_PER_THREAD],
Int2Type<false> /*is_keys_only*/,
Int2Type<true> /*is_blocked*/)
{
CTA_SYNC();
// Exchange values through shared memory in blocked arrangement
BlockExchangeValues(temp_storage.exchange_values).ScatterToBlocked(values, ranks);
}
/// ExchangeValues (specialized for key-value sort, to-striped arrangement)
__device__ __forceinline__ void ExchangeValues(
ValueT (&values)[ITEMS_PER_THREAD],
int (&ranks)[ITEMS_PER_THREAD],
Int2Type<false> /*is_keys_only*/,
Int2Type<false> /*is_blocked*/)
{
CTA_SYNC();
// Exchange values through shared memory in blocked arrangement
BlockExchangeValues(temp_storage.exchange_values).ScatterToStriped(values, ranks);
}
/// ExchangeValues (specialized for keys-only sort)
template <int IS_BLOCKED>
__device__ __forceinline__ void ExchangeValues(
ValueT (&/*values*/)[ITEMS_PER_THREAD],
int (&/*ranks*/)[ITEMS_PER_THREAD],
Int2Type<true> /*is_keys_only*/,
Int2Type<IS_BLOCKED> /*is_blocked*/)
{}
/// Sort blocked arrangement
template <int DESCENDING, int KEYS_ONLY>
__device__ __forceinline__ void SortBlocked(
KeyT (&keys)[ITEMS_PER_THREAD], ///< Keys to sort
ValueT (&values)[ITEMS_PER_THREAD], ///< Values to sort
int begin_bit, ///< The beginning (least-significant) bit index needed for key comparison
int end_bit, ///< The past-the-end (most-significant) bit index needed for key comparison
Int2Type<DESCENDING> is_descending, ///< Tag whether is a descending-order sort
Int2Type<KEYS_ONLY> is_keys_only) ///< Tag whether is keys-only sort
{
UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD] =
reinterpret_cast<UnsignedBits (&)[ITEMS_PER_THREAD]>(keys);
// Twiddle bits if necessary
#pragma unroll
for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
{
unsigned_keys[KEY] = KeyTraits::TwiddleIn(unsigned_keys[KEY]);
}
// Radix sorting passes
while (true)
{
int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit);
// Rank the blocked keys
int ranks[ITEMS_PER_THREAD];
RankKeys(unsigned_keys, ranks, begin_bit, pass_bits, is_descending);
begin_bit += RADIX_BITS;
CTA_SYNC();
// Exchange keys through shared memory in blocked arrangement
BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks);
// Exchange values through shared memory in blocked arrangement
ExchangeValues(values, ranks, is_keys_only, Int2Type<true>());
// Quit if done
if (begin_bit >= end_bit) break;
CTA_SYNC();
}
// Untwiddle bits if necessary
#pragma unroll
for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
{
unsigned_keys[KEY] = KeyTraits::TwiddleOut(unsigned_keys[KEY]);
}
}
public:
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/// Sort blocked -> striped arrangement
template <int DESCENDING, int KEYS_ONLY>
__device__ __forceinline__ void SortBlockedToStriped(
KeyT (&keys)[ITEMS_PER_THREAD], ///< Keys to sort
ValueT (&values)[ITEMS_PER_THREAD], ///< Values to sort
int begin_bit, ///< The beginning (least-significant) bit index needed for key comparison
int end_bit, ///< The past-the-end (most-significant) bit index needed for key comparison
Int2Type<DESCENDING> is_descending, ///< Tag whether is a descending-order sort
Int2Type<KEYS_ONLY> is_keys_only) ///< Tag whether is keys-only sort
{
UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD] =
reinterpret_cast<UnsignedBits (&)[ITEMS_PER_THREAD]>(keys);
// Twiddle bits if necessary
#pragma unroll
for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
{
unsigned_keys[KEY] = KeyTraits::TwiddleIn(unsigned_keys[KEY]);
}
// Radix sorting passes
while (true)
{
int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit);
// Rank the blocked keys
int ranks[ITEMS_PER_THREAD];
RankKeys(unsigned_keys, ranks, begin_bit, pass_bits, is_descending);
begin_bit += RADIX_BITS;
CTA_SYNC();
// Check if this is the last pass
if (begin_bit >= end_bit)
{
// Last pass exchanges keys through shared memory in striped arrangement
BlockExchangeKeys(temp_storage.exchange_keys).ScatterToStriped(keys, ranks);
// Last pass exchanges through shared memory in striped arrangement
ExchangeValues(values, ranks, is_keys_only, Int2Type<false>());
// Quit
break;
}
// Exchange keys through shared memory in blocked arrangement
BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks);
// Exchange values through shared memory in blocked arrangement
ExchangeValues(values, ranks, is_keys_only, Int2Type<true>());
CTA_SYNC();
}
// Untwiddle bits if necessary
#pragma unroll
for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
{
unsigned_keys[KEY] = KeyTraits::TwiddleOut(unsigned_keys[KEY]);
}
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
/// \smemstorage{BlockRadixSort}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockRadixSort()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockRadixSort(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Sorting (blocked arrangements)
*********************************************************************/
//@{
/**
* \brief Performs an ascending block-wide radix sort over a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys.
*
* \par
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sort of 512 integer keys that
* are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive keys.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each
* typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort;
*
* // Allocate shared memory for BlockRadixSort
* __shared__ typename BlockRadixSort::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* ...
*
* // Collectively sort the keys
* BlockRadixSort(temp_storage).Sort(thread_keys);
*
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>.
* The corresponding output \p thread_keys in those threads will be
* <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>.
*/
__device__ __forceinline__ void Sort(
KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison
int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison
{
NullType values[ITEMS_PER_THREAD];
SortBlocked(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>());
}
/**
* \brief Performs an ascending block-wide radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys and values.
*
* \par
* - BlockRadixSort can only accommodate one associated tile of values. To "truck along"
* more than one tile of values, simply perform a key-value sort of the keys paired
* with a temporary value array that enumerates the key indices. The reordered indices
* can then be used as a gather-vector for exchanging other associated tile data through
* shared memory.
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sort of 512 integer keys and values that
* are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive pairs.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each
* typedef cub::BlockRadixSort<int, 128, 4, int> BlockRadixSort;
*
* // Allocate shared memory for BlockRadixSort
* __shared__ typename BlockRadixSort::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* int thread_values[4];
* ...
*
* // Collectively sort the keys and values among block threads
* BlockRadixSort(temp_storage).Sort(thread_keys, thread_values);
*
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The
* corresponding output \p thread_keys in those threads will be
* <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>.
*
*/
__device__ __forceinline__ void Sort(
KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort
int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison
int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison
{
SortBlocked(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>());
}
/**
* \brief Performs a descending block-wide radix sort over a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys.
*
* \par
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sort of 512 integer keys that
* are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive keys.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each
* typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort;
*
* // Allocate shared memory for BlockRadixSort
* __shared__ typename BlockRadixSort::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* ...
*
* // Collectively sort the keys
* BlockRadixSort(temp_storage).Sort(thread_keys);
*
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>.
* The corresponding output \p thread_keys in those threads will be
* <tt>{ [511,510,509,508], [11,10,9,8], [7,6,5,4], ..., [3,2,1,0] }</tt>.
*/
__device__ __forceinline__ void SortDescending(
KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison
int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison
{
NullType values[ITEMS_PER_THREAD];
SortBlocked(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>());
}
/**
* \brief Performs a descending block-wide radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys and values.
*
* \par
* - BlockRadixSort can only accommodate one associated tile of values. To "truck along"
* more than one tile of values, simply perform a key-value sort of the keys paired
* with a temporary value array that enumerates the key indices. The reordered indices
* can then be used as a gather-vector for exchanging other associated tile data through
* shared memory.
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sort of 512 integer keys and values that
* are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive pairs.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each
* typedef cub::BlockRadixSort<int, 128, 4, int> BlockRadixSort;
*
* // Allocate shared memory for BlockRadixSort
* __shared__ typename BlockRadixSort::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* int thread_values[4];
* ...
*
* // Collectively sort the keys and values among block threads
* BlockRadixSort(temp_storage).Sort(thread_keys, thread_values);
*
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The
* corresponding output \p thread_keys in those threads will be
* <tt>{ [511,510,509,508], [11,10,9,8], [7,6,5,4], ..., [3,2,1,0] }</tt>.
*
*/
__device__ __forceinline__ void SortDescending(
KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort
int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison
int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison
{
SortBlocked(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>());
}
//@} end member group
/******************************************************************//**
* \name Sorting (blocked arrangement -> striped arrangement)
*********************************************************************/
//@{
/**
* \brief Performs an ascending radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys, leaving them in a [<em>striped arrangement</em>](index.html#sec5sec3).
*
* \par
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sort of 512 integer keys that
* are initially partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive keys. The final partitioning is striped.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each
* typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort;
*
* // Allocate shared memory for BlockRadixSort
* __shared__ typename BlockRadixSort::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* ...
*
* // Collectively sort the keys
* BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys);
*
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The
* corresponding output \p thread_keys in those threads will be
* <tt>{ [0,128,256,384], [1,129,257,385], [2,130,258,386], ..., [127,255,383,511] }</tt>.
*
*/
__device__ __forceinline__ void SortBlockedToStriped(
KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison
int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison
{
NullType values[ITEMS_PER_THREAD];
SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>());
}
/**
* \brief Performs an ascending radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys and values, leaving them in a [<em>striped arrangement</em>](index.html#sec5sec3).
*
* \par
* - BlockRadixSort can only accommodate one associated tile of values. To "truck along"
* more than one tile of values, simply perform a key-value sort of the keys paired
* with a temporary value array that enumerates the key indices. The reordered indices
* can then be used as a gather-vector for exchanging other associated tile data through
* shared memory.
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sort of 512 integer keys and values that
* are initially partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive pairs. The final partitioning is striped.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each
* typedef cub::BlockRadixSort<int, 128, 4, int> BlockRadixSort;
*
* // Allocate shared memory for BlockRadixSort
* __shared__ typename BlockRadixSort::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* int thread_values[4];
* ...
*
* // Collectively sort the keys and values among block threads
* BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys, thread_values);
*
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The
* corresponding output \p thread_keys in those threads will be
* <tt>{ [0,128,256,384], [1,129,257,385], [2,130,258,386], ..., [127,255,383,511] }</tt>.
*
*/
__device__ __forceinline__ void SortBlockedToStriped(
KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort
int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison
int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison
{
SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>());
}
/**
* \brief Performs a descending radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys, leaving them in a [<em>striped arrangement</em>](index.html#sec5sec3).
*
* \par
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sort of 512 integer keys that
* are initially partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive keys. The final partitioning is striped.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each
* typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort;
*
* // Allocate shared memory for BlockRadixSort
* __shared__ typename BlockRadixSort::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* ...
*
* // Collectively sort the keys
* BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys);
*
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The
* corresponding output \p thread_keys in those threads will be
* <tt>{ [511,383,255,127], [386,258,130,2], [385,257,128,1], ..., [384,256,128,0] }</tt>.
*
*/
__device__ __forceinline__ void SortDescendingBlockedToStriped(
KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison
int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison
{
NullType values[ITEMS_PER_THREAD];
SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>());
}
/**
* \brief Performs a descending radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys and values, leaving them in a [<em>striped arrangement</em>](index.html#sec5sec3).
*
* \par
* - BlockRadixSort can only accommodate one associated tile of values. To "truck along"
* more than one tile of values, simply perform a key-value sort of the keys paired
* with a temporary value array that enumerates the key indices. The reordered indices
* can then be used as a gather-vector for exchanging other associated tile data through
* shared memory.
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a sort of 512 integer keys and values that
* are initially partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads
* where each thread owns 4 consecutive pairs. The final partitioning is striped.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each
* typedef cub::BlockRadixSort<int, 128, 4, int> BlockRadixSort;
*
* // Allocate shared memory for BlockRadixSort
* __shared__ typename BlockRadixSort::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* int thread_values[4];
* ...
*
* // Collectively sort the keys and values among block threads
* BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys, thread_values);
*
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The
* corresponding output \p thread_keys in those threads will be
* <tt>{ [511,383,255,127], [386,258,130,2], [385,257,128,1], ..., [384,256,128,0] }</tt>.
*
*/
__device__ __forceinline__ void SortDescendingBlockedToStriped(
KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort
int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison
int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison
{
SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>());
}
//@} end member group
};
/**
* \example example_block_radix_sort.cu
*/
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |
rapidsai_public_repos/nvgraph/external/cub_semiring | rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_exchange.cuh | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/**
* \file
* The cub::BlockExchange class provides [<em>collective</em>](index.html#sec0) methods for rearranging data partitioned across a CUDA thread block.
*/
#pragma once
#include "../util_ptx.cuh"
#include "../util_arch.cuh"
#include "../util_macro.cuh"
#include "../util_type.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \brief The BlockExchange class provides [<em>collective</em>](index.html#sec0) methods for rearranging data partitioned across a CUDA thread block. ![](transpose_logo.png)
* \ingroup BlockModule
*
* \tparam T The data type to be exchanged.
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam ITEMS_PER_THREAD The number of items partitioned onto each thread.
* \tparam WARP_TIME_SLICING <b>[optional]</b> When \p true, only use enough shared memory for a single warp's worth of tile data, time-slicing the block-wide exchange over multiple synchronized rounds. Yields a smaller memory footprint at the expense of decreased parallelism. (Default: false)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*
* \par Overview
* - It is commonplace for blocks of threads to rearrange data items between
* threads. For example, the device-accessible memory subsystem prefers access patterns
* where data items are "striped" across threads (where consecutive threads access consecutive items),
* yet most block-wide operations prefer a "blocked" partitioning of items across threads
* (where consecutive items belong to a single thread).
* - BlockExchange supports the following types of data exchanges:
* - Transposing between [<em>blocked</em>](index.html#sec5sec3) and [<em>striped</em>](index.html#sec5sec3) arrangements
* - Transposing between [<em>blocked</em>](index.html#sec5sec3) and [<em>warp-striped</em>](index.html#sec5sec3) arrangements
* - Scattering ranked items to a [<em>blocked arrangement</em>](index.html#sec5sec3)
* - Scattering ranked items to a [<em>striped arrangement</em>](index.html#sec5sec3)
* - \rowmajor
*
* \par A Simple Example
* \blockcollective{BlockExchange}
* \par
* The code snippet below illustrates the conversion from a "blocked" to a "striped" arrangement
* of 512 integer items partitioned across 128 threads where each thread owns 4 items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh>
*
* __global__ void ExampleKernel(int *d_data, ...)
* {
* // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockExchange<int, 128, 4> BlockExchange;
*
* // Allocate shared memory for BlockExchange
* __shared__ typename BlockExchange::TempStorage temp_storage;
*
* // Load a tile of data striped across threads
* int thread_data[4];
* cub::LoadDirectStriped<128>(threadIdx.x, d_data, thread_data);
*
* // Collectively exchange data into a blocked arrangement across threads
* BlockExchange(temp_storage).StripedToBlocked(thread_data);
*
* \endcode
* \par
* Suppose the set of striped input \p thread_data across the block of threads is
* <tt>{ [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] }</tt>.
* The corresponding output \p thread_data in those threads will be
* <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>.
*
* \par Performance Considerations
* - Proper device-specific padding ensures zero bank conflicts for most types.
*
*/
template <
typename InputT,
int BLOCK_DIM_X,
int ITEMS_PER_THREAD,
bool WARP_TIME_SLICING = false,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockExchange
{
private:
/******************************************************************************
* Constants
******************************************************************************/
/// Constants
enum
{
/// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH),
WARP_THREADS = 1 << LOG_WARP_THREADS,
WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS,
LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(PTX_ARCH),
SMEM_BANKS = 1 << LOG_SMEM_BANKS,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
TIME_SLICES = (WARP_TIME_SLICING) ? WARPS : 1,
TIME_SLICED_THREADS = (WARP_TIME_SLICING) ? CUB_MIN(BLOCK_THREADS, WARP_THREADS) : BLOCK_THREADS,
TIME_SLICED_ITEMS = TIME_SLICED_THREADS * ITEMS_PER_THREAD,
WARP_TIME_SLICED_THREADS = CUB_MIN(BLOCK_THREADS, WARP_THREADS),
WARP_TIME_SLICED_ITEMS = WARP_TIME_SLICED_THREADS * ITEMS_PER_THREAD,
// Insert padding to avoid bank conflicts during raking when items per thread is a power of two and > 4 (otherwise we can typically use 128b loads)
INSERT_PADDING = (ITEMS_PER_THREAD > 4) && (PowerOfTwo<ITEMS_PER_THREAD>::VALUE),
PADDING_ITEMS = (INSERT_PADDING) ? (TIME_SLICED_ITEMS >> LOG_SMEM_BANKS) : 0,
};
/******************************************************************************
* Type definitions
******************************************************************************/
/// Shared memory storage layout type
struct __align__(16) _TempStorage
{
InputT buff[TIME_SLICED_ITEMS + PADDING_ITEMS];
};
public:
/// \smemstorage{BlockExchange}
struct TempStorage : Uninitialized<_TempStorage> {};
private:
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
unsigned int lane_id;
unsigned int warp_id;
unsigned int warp_offset;
/******************************************************************************
* Utility methods
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/**
* Transposes data items from <em>blocked</em> arrangement to <em>striped</em> arrangement. Specialized for no timeslicing.
*/
template <typename OutputT>
__device__ __forceinline__ void BlockedToStriped(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
Int2Type<false> /*time_slicing*/)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (linear_tid * ITEMS_PER_THREAD) + ITEM;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_storage.buff[item_offset] = input_items[ITEM];
}
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
/**
* Transposes data items from <em>blocked</em> arrangement to <em>striped</em> arrangement. Specialized for warp-timeslicing.
*/
template <typename OutputT>
__device__ __forceinline__ void BlockedToStriped(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
Int2Type<true> /*time_slicing*/)
{
InputT temp_items[ITEMS_PER_THREAD];
#pragma unroll
for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++)
{
const int SLICE_OFFSET = SLICE * TIME_SLICED_ITEMS;
const int SLICE_OOB = SLICE_OFFSET + TIME_SLICED_ITEMS;
CTA_SYNC();
if (warp_id == SLICE)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (lane_id * ITEMS_PER_THREAD) + ITEM;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_storage.buff[item_offset] = input_items[ITEM];
}
}
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
// Read a strip of items
const int STRIP_OFFSET = ITEM * BLOCK_THREADS;
const int STRIP_OOB = STRIP_OFFSET + BLOCK_THREADS;
if ((SLICE_OFFSET < STRIP_OOB) && (SLICE_OOB > STRIP_OFFSET))
{
int item_offset = STRIP_OFFSET + linear_tid - SLICE_OFFSET;
if ((item_offset >= 0) && (item_offset < TIME_SLICED_ITEMS))
{
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_items[ITEM] = temp_storage.buff[item_offset];
}
}
}
}
// Copy
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
output_items[ITEM] = temp_items[ITEM];
}
}
/**
* Transposes data items from <em>blocked</em> arrangement to <em>warp-striped</em> arrangement. Specialized for no timeslicing
*/
template <typename OutputT>
__device__ __forceinline__ void BlockedToWarpStriped(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
Int2Type<false> /*time_slicing*/)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = warp_offset + ITEM + (lane_id * ITEMS_PER_THREAD);
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_storage.buff[item_offset] = input_items[ITEM];
}
WARP_SYNC(0xffffffff);
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = warp_offset + (ITEM * WARP_TIME_SLICED_THREADS) + lane_id;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
/**
* Transposes data items from <em>blocked</em> arrangement to <em>warp-striped</em> arrangement. Specialized for warp-timeslicing
*/
template <typename OutputT>
__device__ __forceinline__ void BlockedToWarpStriped(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
Int2Type<true> /*time_slicing*/)
{
if (warp_id == 0)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = ITEM + (lane_id * ITEMS_PER_THREAD);
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_storage.buff[item_offset] = input_items[ITEM];
}
WARP_SYNC(0xffffffff);
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (ITEM * WARP_TIME_SLICED_THREADS) + lane_id;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
#pragma unroll
for (unsigned int SLICE = 1; SLICE < TIME_SLICES; ++SLICE)
{
CTA_SYNC();
if (warp_id == SLICE)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = ITEM + (lane_id * ITEMS_PER_THREAD);
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_storage.buff[item_offset] = input_items[ITEM];
}
WARP_SYNC(0xffffffff);
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (ITEM * WARP_TIME_SLICED_THREADS) + lane_id;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
}
}
/**
* Transposes data items from <em>striped</em> arrangement to <em>blocked</em> arrangement. Specialized for no timeslicing.
*/
template <typename OutputT>
__device__ __forceinline__ void StripedToBlocked(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
Int2Type<false> /*time_slicing*/)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_storage.buff[item_offset] = input_items[ITEM];
}
CTA_SYNC();
// No timeslicing
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (linear_tid * ITEMS_PER_THREAD) + ITEM;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
/**
* Transposes data items from <em>striped</em> arrangement to <em>blocked</em> arrangement. Specialized for warp-timeslicing.
*/
template <typename OutputT>
__device__ __forceinline__ void StripedToBlocked(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
Int2Type<true> /*time_slicing*/)
{
// Warp time-slicing
InputT temp_items[ITEMS_PER_THREAD];
#pragma unroll
for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++)
{
const int SLICE_OFFSET = SLICE * TIME_SLICED_ITEMS;
const int SLICE_OOB = SLICE_OFFSET + TIME_SLICED_ITEMS;
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
// Write a strip of items
const int STRIP_OFFSET = ITEM * BLOCK_THREADS;
const int STRIP_OOB = STRIP_OFFSET + BLOCK_THREADS;
if ((SLICE_OFFSET < STRIP_OOB) && (SLICE_OOB > STRIP_OFFSET))
{
int item_offset = STRIP_OFFSET + linear_tid - SLICE_OFFSET;
if ((item_offset >= 0) && (item_offset < TIME_SLICED_ITEMS))
{
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_storage.buff[item_offset] = input_items[ITEM];
}
}
}
CTA_SYNC();
if (warp_id == SLICE)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (lane_id * ITEMS_PER_THREAD) + ITEM;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_items[ITEM] = temp_storage.buff[item_offset];
}
}
}
// Copy
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
output_items[ITEM] = temp_items[ITEM];
}
}
/**
* Transposes data items from <em>warp-striped</em> arrangement to <em>blocked</em> arrangement. Specialized for no timeslicing
*/
template <typename OutputT>
__device__ __forceinline__ void WarpStripedToBlocked(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
Int2Type<false> /*time_slicing*/)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = warp_offset + (ITEM * WARP_TIME_SLICED_THREADS) + lane_id;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_storage.buff[item_offset] = input_items[ITEM];
}
WARP_SYNC(0xffffffff);
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = warp_offset + ITEM + (lane_id * ITEMS_PER_THREAD);
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
/**
* Transposes data items from <em>warp-striped</em> arrangement to <em>blocked</em> arrangement. Specialized for warp-timeslicing
*/
template <typename OutputT>
__device__ __forceinline__ void WarpStripedToBlocked(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
Int2Type<true> /*time_slicing*/)
{
#pragma unroll
for (unsigned int SLICE = 0; SLICE < TIME_SLICES; ++SLICE)
{
CTA_SYNC();
if (warp_id == SLICE)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (ITEM * WARP_TIME_SLICED_THREADS) + lane_id;
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_storage.buff[item_offset] = input_items[ITEM];
}
WARP_SYNC(0xffffffff);
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = ITEM + (lane_id * ITEMS_PER_THREAD);
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
}
}
/**
* Exchanges data items annotated by rank into <em>blocked</em> arrangement. Specialized for no timeslicing.
*/
template <typename OutputT, typename OffsetT>
__device__ __forceinline__ void ScatterToBlocked(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks
Int2Type<false> /*time_slicing*/)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = ranks[ITEM];
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
temp_storage.buff[item_offset] = input_items[ITEM];
}
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (linear_tid * ITEMS_PER_THREAD) + ITEM;
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
/**
* Exchanges data items annotated by rank into <em>blocked</em> arrangement. Specialized for warp-timeslicing.
*/
template <typename OutputT, typename OffsetT>
__device__ __forceinline__ void ScatterToBlocked(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks
Int2Type<true> /*time_slicing*/)
{
InputT temp_items[ITEMS_PER_THREAD];
#pragma unroll
for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++)
{
CTA_SYNC();
const int SLICE_OFFSET = TIME_SLICED_ITEMS * SLICE;
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = ranks[ITEM] - SLICE_OFFSET;
if ((item_offset >= 0) && (item_offset < WARP_TIME_SLICED_ITEMS))
{
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
temp_storage.buff[item_offset] = input_items[ITEM];
}
}
CTA_SYNC();
if (warp_id == SLICE)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (lane_id * ITEMS_PER_THREAD) + ITEM;
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
temp_items[ITEM] = temp_storage.buff[item_offset];
}
}
}
// Copy
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
output_items[ITEM] = temp_items[ITEM];
}
}
/**
* Exchanges data items annotated by rank into <em>striped</em> arrangement. Specialized for no timeslicing.
*/
template <typename OutputT, typename OffsetT>
__device__ __forceinline__ void ScatterToStriped(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks
Int2Type<false> /*time_slicing*/)
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = ranks[ITEM];
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
temp_storage.buff[item_offset] = input_items[ITEM];
}
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid;
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
/**
* Exchanges data items annotated by rank into <em>striped</em> arrangement. Specialized for warp-timeslicing.
*/
template <typename OutputT, typename OffsetT>
__device__ __forceinline__ void ScatterToStriped(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks
Int2Type<true> /*time_slicing*/)
{
InputT temp_items[ITEMS_PER_THREAD];
#pragma unroll
for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++)
{
const int SLICE_OFFSET = SLICE * TIME_SLICED_ITEMS;
const int SLICE_OOB = SLICE_OFFSET + TIME_SLICED_ITEMS;
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = ranks[ITEM] - SLICE_OFFSET;
if ((item_offset >= 0) && (item_offset < WARP_TIME_SLICED_ITEMS))
{
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
temp_storage.buff[item_offset] = input_items[ITEM];
}
}
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
// Read a strip of items
const int STRIP_OFFSET = ITEM * BLOCK_THREADS;
const int STRIP_OOB = STRIP_OFFSET + BLOCK_THREADS;
if ((SLICE_OFFSET < STRIP_OOB) && (SLICE_OOB > STRIP_OFFSET))
{
int item_offset = STRIP_OFFSET + linear_tid - SLICE_OFFSET;
if ((item_offset >= 0) && (item_offset < TIME_SLICED_ITEMS))
{
if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS;
temp_items[ITEM] = temp_storage.buff[item_offset];
}
}
}
}
// Copy
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
output_items[ITEM] = temp_items[ITEM];
}
}
public:
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockExchange()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)),
warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS),
lane_id(LaneId()),
warp_offset(warp_id * WARP_TIME_SLICED_ITEMS)
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockExchange(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)),
lane_id(LaneId()),
warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS),
warp_offset(warp_id * WARP_TIME_SLICED_ITEMS)
{}
//@} end member group
/******************************************************************//**
* \name Structured exchanges
*********************************************************************/
//@{
/**
* \brief Transposes data items from <em>striped</em> arrangement to <em>blocked</em> arrangement.
*
* \par
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates the conversion from a "striped" to a "blocked" arrangement
* of 512 integer items partitioned across 128 threads where each thread owns 4 items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh>
*
* __global__ void ExampleKernel(int *d_data, ...)
* {
* // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockExchange<int, 128, 4> BlockExchange;
*
* // Allocate shared memory for BlockExchange
* __shared__ typename BlockExchange::TempStorage temp_storage;
*
* // Load a tile of ordered data into a striped arrangement across block threads
* int thread_data[4];
* cub::LoadDirectStriped<128>(threadIdx.x, d_data, thread_data);
*
* // Collectively exchange data into a blocked arrangement across threads
* BlockExchange(temp_storage).StripedToBlocked(thread_data, thread_data);
*
* \endcode
* \par
* Suppose the set of striped input \p thread_data across the block of threads is
* <tt>{ [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] }</tt> after loading from device-accessible memory.
* The corresponding output \p thread_data in those threads will be
* <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>.
*
*/
template <typename OutputT>
__device__ __forceinline__ void StripedToBlocked(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
{
StripedToBlocked(input_items, output_items, Int2Type<WARP_TIME_SLICING>());
}
/**
* \brief Transposes data items from <em>blocked</em> arrangement to <em>striped</em> arrangement.
*
* \par
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates the conversion from a "blocked" to a "striped" arrangement
* of 512 integer items partitioned across 128 threads where each thread owns 4 items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh>
*
* __global__ void ExampleKernel(int *d_data, ...)
* {
* // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockExchange<int, 128, 4> BlockExchange;
*
* // Allocate shared memory for BlockExchange
* __shared__ typename BlockExchange::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_data[4];
* ...
*
* // Collectively exchange data into a striped arrangement across threads
* BlockExchange(temp_storage).BlockedToStriped(thread_data, thread_data);
*
* // Store data striped across block threads into an ordered tile
* cub::StoreDirectStriped<STORE_DEFAULT, 128>(threadIdx.x, d_data, thread_data);
*
* \endcode
* \par
* Suppose the set of blocked input \p thread_data across the block of threads is
* <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>.
* The corresponding output \p thread_data in those threads will be
* <tt>{ [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] }</tt> in
* preparation for storing to device-accessible memory.
*
*/
template <typename OutputT>
__device__ __forceinline__ void BlockedToStriped(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
{
BlockedToStriped(input_items, output_items, Int2Type<WARP_TIME_SLICING>());
}
/**
* \brief Transposes data items from <em>warp-striped</em> arrangement to <em>blocked</em> arrangement.
*
* \par
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates the conversion from a "warp-striped" to a "blocked" arrangement
* of 512 integer items partitioned across 128 threads where each thread owns 4 items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh>
*
* __global__ void ExampleKernel(int *d_data, ...)
* {
* // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockExchange<int, 128, 4> BlockExchange;
*
* // Allocate shared memory for BlockExchange
* __shared__ typename BlockExchange::TempStorage temp_storage;
*
* // Load a tile of ordered data into a warp-striped arrangement across warp threads
* int thread_data[4];
* cub::LoadSWarptriped<LOAD_DEFAULT>(threadIdx.x, d_data, thread_data);
*
* // Collectively exchange data into a blocked arrangement across threads
* BlockExchange(temp_storage).WarpStripedToBlocked(thread_data);
*
* \endcode
* \par
* Suppose the set of warp-striped input \p thread_data across the block of threads is
* <tt>{ [0,32,64,96], [1,33,65,97], [2,34,66,98], ..., [415,447,479,511] }</tt>
* after loading from device-accessible memory. (The first 128 items are striped across
* the first warp of 32 threads, the second 128 items are striped across the second warp, etc.)
* The corresponding output \p thread_data in those threads will be
* <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>.
*
*/
template <typename OutputT>
__device__ __forceinline__ void WarpStripedToBlocked(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
{
WarpStripedToBlocked(input_items, output_items, Int2Type<WARP_TIME_SLICING>());
}
/**
* \brief Transposes data items from <em>blocked</em> arrangement to <em>warp-striped</em> arrangement.
*
* \par
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates the conversion from a "blocked" to a "warp-striped" arrangement
* of 512 integer items partitioned across 128 threads where each thread owns 4 items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh>
*
* __global__ void ExampleKernel(int *d_data, ...)
* {
* // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockExchange<int, 128, 4> BlockExchange;
*
* // Allocate shared memory for BlockExchange
* __shared__ typename BlockExchange::TempStorage temp_storage;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_data[4];
* ...
*
* // Collectively exchange data into a warp-striped arrangement across threads
* BlockExchange(temp_storage).BlockedToWarpStriped(thread_data, thread_data);
*
* // Store data striped across warp threads into an ordered tile
* cub::StoreDirectStriped<STORE_DEFAULT, 128>(threadIdx.x, d_data, thread_data);
*
* \endcode
* \par
* Suppose the set of blocked input \p thread_data across the block of threads is
* <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>.
* The corresponding output \p thread_data in those threads will be
* <tt>{ [0,32,64,96], [1,33,65,97], [2,34,66,98], ..., [415,447,479,511] }</tt>
* in preparation for storing to device-accessible memory. (The first 128 items are striped across
* the first warp of 32 threads, the second 128 items are striped across the second warp, etc.)
*
*/
template <typename OutputT>
__device__ __forceinline__ void BlockedToWarpStriped(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
{
BlockedToWarpStriped(input_items, output_items, Int2Type<WARP_TIME_SLICING>());
}
//@} end member group
/******************************************************************//**
* \name Scatter exchanges
*********************************************************************/
//@{
/**
* \brief Exchanges data items annotated by rank into <em>blocked</em> arrangement.
*
* \par
* - \smemreuse
*
* \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets
*/
template <typename OutputT, typename OffsetT>
__device__ __forceinline__ void ScatterToBlocked(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks
{
ScatterToBlocked(input_items, output_items, ranks, Int2Type<WARP_TIME_SLICING>());
}
/**
* \brief Exchanges data items annotated by rank into <em>striped</em> arrangement.
*
* \par
* - \smemreuse
*
* \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets
*/
template <typename OutputT, typename OffsetT>
__device__ __forceinline__ void ScatterToStriped(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks
{
ScatterToStriped(input_items, output_items, ranks, Int2Type<WARP_TIME_SLICING>());
}
/**
* \brief Exchanges data items annotated by rank into <em>striped</em> arrangement. Items with rank -1 are not exchanged.
*
* \par
* - \smemreuse
*
* \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets
*/
template <typename OutputT, typename OffsetT>
__device__ __forceinline__ void ScatterToStripedGuarded(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = ranks[ITEM];
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
if (ranks[ITEM] >= 0)
temp_storage.buff[item_offset] = input_items[ITEM];
}
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid;
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
/**
* \brief Exchanges valid data items annotated by rank into <em>striped</em> arrangement.
*
* \par
* - \smemreuse
*
* \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets
* \tparam ValidFlag <b>[inferred]</b> FlagT type denoting which items are valid
*/
template <typename OutputT, typename OffsetT, typename ValidFlag>
__device__ __forceinline__ void ScatterToStripedFlagged(
InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks
ValidFlag is_valid[ITEMS_PER_THREAD]) ///< [in] Corresponding flag denoting item validity
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = ranks[ITEM];
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
if (is_valid[ITEM])
temp_storage.buff[item_offset] = input_items[ITEM];
}
CTA_SYNC();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid;
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
output_items[ITEM] = temp_storage.buff[item_offset];
}
}
//@} end member group
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
__device__ __forceinline__ void StripedToBlocked(
InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
{
StripedToBlocked(items, items);
}
__device__ __forceinline__ void BlockedToStriped(
InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
{
BlockedToStriped(items, items);
}
__device__ __forceinline__ void WarpStripedToBlocked(
InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
{
WarpStripedToBlocked(items, items);
}
__device__ __forceinline__ void BlockedToWarpStriped(
InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
{
BlockedToWarpStriped(items, items);
}
template <typename OffsetT>
__device__ __forceinline__ void ScatterToBlocked(
InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks
{
ScatterToBlocked(items, items, ranks);
}
template <typename OffsetT>
__device__ __forceinline__ void ScatterToStriped(
InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks
{
ScatterToStriped(items, items, ranks);
}
template <typename OffsetT>
__device__ __forceinline__ void ScatterToStripedGuarded(
InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks
{
ScatterToStripedGuarded(items, items, ranks);
}
template <typename OffsetT, typename ValidFlag>
__device__ __forceinline__ void ScatterToStripedFlagged(
InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements.
OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks
ValidFlag is_valid[ITEMS_PER_THREAD]) ///< [in] Corresponding flag denoting item validity
{
ScatterToStriped(items, items, ranks, is_valid);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <
typename T,
int ITEMS_PER_THREAD,
int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS,
int PTX_ARCH = CUB_PTX_ARCH>
class WarpExchange
{
private:
/******************************************************************************
* Constants
******************************************************************************/
/// Constants
enum
{
// Whether the logical warp size and the PTX warp size coincide
IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)),
WARP_ITEMS = (ITEMS_PER_THREAD * LOGICAL_WARP_THREADS) + 1,
LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(PTX_ARCH),
SMEM_BANKS = 1 << LOG_SMEM_BANKS,
// Insert padding if the number of items per thread is a power of two and > 4 (otherwise we can typically use 128b loads)
INSERT_PADDING = (ITEMS_PER_THREAD > 4) && (PowerOfTwo<ITEMS_PER_THREAD>::VALUE),
PADDING_ITEMS = (INSERT_PADDING) ? (WARP_ITEMS >> LOG_SMEM_BANKS) : 0,
};
/******************************************************************************
* Type definitions
******************************************************************************/
/// Shared memory storage layout type
struct _TempStorage
{
T buff[WARP_ITEMS + PADDING_ITEMS];
};
public:
/// \smemstorage{WarpExchange}
struct TempStorage : Uninitialized<_TempStorage> {};
private:
/******************************************************************************
* Thread fields
******************************************************************************/
_TempStorage &temp_storage;
int lane_id;
public:
/******************************************************************************
* Construction
******************************************************************************/
/// Constructor
__device__ __forceinline__ WarpExchange(
TempStorage &temp_storage)
:
temp_storage(temp_storage.Alias()),
lane_id(IS_ARCH_WARP ?
LaneId() :
LaneId() % LOGICAL_WARP_THREADS)
{}
/******************************************************************************
* Interface
******************************************************************************/
/**
* \brief Exchanges valid data items annotated by rank into <em>striped</em> arrangement.
*
* \par
* - \smemreuse
*
* \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets
*/
template <typename OffsetT>
__device__ __forceinline__ void ScatterToStriped(
T items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange
OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
if (INSERT_PADDING) ranks[ITEM] = SHR_ADD(ranks[ITEM], LOG_SMEM_BANKS, ranks[ITEM]);
temp_storage.buff[ranks[ITEM]] = items[ITEM];
}
WARP_SYNC(0xffffffff);
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
int item_offset = (ITEM * LOGICAL_WARP_THREADS) + lane_id;
if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset);
items[ITEM] = temp_storage.buff[item_offset];
}
}
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
| 0 |