file_path
stringlengths
32
153
content
stringlengths
0
3.14M
omniverse-code/kit/include/omni/graph/core/ConstBundlePrims.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ==================================================================================================== /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| This is a temporary interface that can change at any time. */ // ==================================================================================================== #include "BundleAttrib.h" #include <omni/graph/core/IBundleFactory.h> #include <unordered_map> #include <memory> #include <vector> namespace omni { namespace graph { namespace core { class ConstBundlePrims; class ConstBundlePrimIterator; class ConstBundlePrimAttrIterator; /** * Index used to identify primitives in a bundle. */ using BundlePrimIndex = size_t; constexpr BundlePrimIndex kInvalidBundlePrimIndex = ~BundlePrimIndex(0); /** * Collection of read-only attributes in a primitive. * * Const Bundle Primitive is not movable, not copyable. It lifespan is managed by Const Bundle Primitives. */ class ConstBundlePrim { public: using BundleAttributeMap = std::unordered_map<NameToken, std::unique_ptr<BundleAttrib>>; using AttrMapIteratorType = BundleAttributeMap::const_iterator; ConstBundlePrim(ConstBundlePrim const&) = delete; ConstBundlePrim(ConstBundlePrim&&) = delete; ConstBundlePrim& operator=(ConstBundlePrim const& that) = delete; ConstBundlePrim& operator=(ConstBundlePrim&&) = delete; /** * @return Bundle handle of this primitive. */ ConstBundleHandle getConstHandle() noexcept; /** * @return Parent bundle prims of this primitive. */ ConstBundlePrims* getConstBundlePrims() noexcept; /** * @return Number of attributes in this primitive. Does not include internal attributes. */ size_t attrCount() noexcept; /** * @return PrimAttribute if attribute with given name is found, nullptr otherwise. */ BundleAttrib const* getConstAttr(NameToken attrName) noexcept; /** * @return Index of this primitive in parent bundle. */ BundlePrimIndex primIndex() noexcept; /** * @return Path of this primitive. */ NameToken path() noexcept; /** * @return Type of this primitive. */ NameToken type() noexcept; [[deprecated("Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType dirtyID() noexcept; /** * @return Attribute iterator pointing to the first attribute in this bundle. */ ConstBundlePrimAttrIterator begin() noexcept; /** * @return Attribute iterator pointing to the last attribute in this bundle. */ ConstBundlePrimAttrIterator end() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @deprecated Do not use!. Use getConstAttr(). */ [[deprecated("Use non const instead.")]] BundleAttrib const* getAttr(NameToken attrName) const noexcept; /** * @deprecated Do not use!. Use non-const variant of path(). */ [[deprecated("Use non const instead.")]] NameToken path() const noexcept; /** * @deprecated Do not use!. Use non-const variant of type(). */ [[deprecated("Use non const instead.")]] NameToken type() const noexcept; [[deprecated("Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType dirtyID() const noexcept; /** * @deprecated Do not use!. Use non-const variant of begin(). */ [[deprecated("Use non const instead.")]] ConstBundlePrimAttrIterator begin() const noexcept; /** * @deprecated Do not use!. Use non-const variant of end(). */ [[deprecated("Use non const instead.")]] ConstBundlePrimAttrIterator end() const noexcept; protected: /** * Direct initialization with IConstBundle interface. * * ConstBundlePrim and BundlePrim take advantage of polymorphic relationship * between IConstBundle and IBundle interfaces. * In order to modify bundles, BundlePrim makes attempt to down cast IConstBundle * to IBundle interface. When this process is successful then, bundle can be modified. * * Only ConstBundlePrims is allowed to create instances of ConstBundlePrim. */ ConstBundlePrim(ConstBundlePrims& bundlePrims, omni::core::ObjectPtr<IConstBundle2> bundle); /** * @return IConstBundle interface for this bundle primitive. */ IConstBundle2* getConstBundlePtr() noexcept; /** * @return Get attribute used by ConstBundlePrims and BundlePrims. */ BundleAttributeMap& getAttributes() noexcept; /** * Reads public attributes from the bundle and caches them as BundleAttribs. */ void readAndCacheAttributes() noexcept; private: ConstBundlePrims* m_bundlePrims{ nullptr }; // Parent of this bundle prim. omni::core::ObjectPtr<IConstBundle2> m_bundle; ConstAttributeDataHandle m_primIndexAttr{ ConstAttributeDataHandle::invalidValue() }; ConstAttributeDataHandle m_pathAttr{ ConstAttributeDataHandle::invalidValue() }; ConstAttributeDataHandle m_typeAttr{ ConstAttributeDataHandle::invalidValue() }; BundleAttributeMap m_attributes; // Cached public attributes that belong to this primitive. friend class BundleAttrib; // Required to access IConstBundle interface. friend class BundlePrim; // Required to access primitive type. friend class BundlePrims; // Required to update internal indices. friend class ConstBundlePrims; // Required to call constructor. }; /** * Collection of read-only primitives in a bundle. * * Const Bundle Primitives is not movable, not copyable. It lifespan is managed by the user. */ class ConstBundlePrims { public: ConstBundlePrims(); ConstBundlePrims(GraphContextObj const& context, ConstBundleHandle const& bundle); ConstBundlePrims(ConstBundlePrims const&) = delete; ConstBundlePrims(ConstBundlePrims&&) = delete; ConstBundlePrims& operator=(ConstBundlePrims const&) = delete; ConstBundlePrims& operator=(ConstBundlePrims&&) = delete; /** * @return Bundle handle of this primitive. */ ConstBundleHandle getConstHandle() noexcept; /** * @return Number of primitives in this bundle of primitives. */ size_t getPrimCount() noexcept; /** * @return Get read only primitive under specified index. */ ConstBundlePrim* getConstPrim(BundlePrimIndex primIndex) noexcept; [[deprecated("Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType getBundleDirtyID() noexcept; /** * Common Attributes are attributes that are shared for entire bundle. * An example of a common attribute is "transform" attribute. * * @return ConstBundlePrims as ConstBundlePrim to access attributes. */ ConstBundlePrim& getConstCommonAttrs() noexcept; /** * @return Context where bundle primitives belongs to. */ GraphContextObj const& context() noexcept; /** * @return Primitive iterator pointing to the first primitive in this bundle. */ ConstBundlePrimIterator begin() noexcept; /** * @return Primitive iterator pointing to the last primitive in this bundle. */ ConstBundlePrimIterator end() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @deprecated Do not use! Use getConstPrim(). */ ConstBundlePrim* getPrim(BundlePrimIndex primIndex) noexcept; [[deprecated("Getting next DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType getNextDirtyID() noexcept { return carb::getCachedInterface<IDirtyID>()->getNextDirtyID(); } /** * @deprecated Use appropriate constructor and heap allocate ConstBundlePrims. * * @todo: There is no benefit of using this method. Cache has to be rebuild from scratch * whenever ConstBundlePrims is attached/detached. * It would be better to remove default constructor and enforce cache construction * through constructor with arguments. */ void attach(GraphContextObj const& context, ConstBundleHandle const& bundle) noexcept; /** * @deprecated Use appropriate constructor and heap allocate ConstBundlePrims. */ void detach() noexcept; /** * @deprecated Use getConstHandle. */ ConstBundleHandle handle() noexcept; /** * @deprecated Use getConstCommonAttrs. */ ConstBundlePrim& getCommonAttrs() noexcept; /** * @deprecated There is no need to separate attributes. Inherently IBundle2 interface keeps them separated. */ void separateAttrs() noexcept; /** * @deprecated Caching attributes is not needed. Calling this method doesn't do anything. */ void ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept; protected: using ConstBundlePrimPtr = std::unique_ptr<ConstBundlePrim>; using BundlePrimArray = std::vector<ConstBundlePrimPtr>; /** * Get bundle primitives in this bundle. */ BundlePrimArray& getPrimitives() noexcept; /** * IConstBundle2 is a polymorphic base for IBundle2, thus passing bundle argument allows passing * version of the interface that allows mutations. */ void attach(omni::core::ObjectPtr<IBundleFactory>&& factory, omni::core::ObjectPtr<IConstBundle2>&& bundle) noexcept; /** * @return Factory to spawn instances of IBundle interface. */ IBundleFactory* getBundleFactoryPtr() noexcept; /** * @return IBundle instance of this bundle. */ IConstBundle2* getConstBundlePtr() noexcept; /** * Instances of BundlePrim are instantiated on demand. Argument create allows * instantiation mutable or immutable IConstBundle2 interface. */ template<typename FUNC> ConstBundlePrim* getConstPrim(BundlePrimIndex primIndex, FUNC create) noexcept; private: omni::core::ObjectPtr<IBundleFactory> m_factory; omni::core::ObjectPtr<IConstBundle2> m_bundle; GraphContextObj m_context; // Backward compatibility. /** * ConstBundlePrims is a bundle as well. To access attributes under this bundle we need to acquire * an instance of ConstBundlePrim for this bundle. Common attributes, with unfortunate name, * gives us ability to access those attributes. */ ConstBundlePrimPtr m_commonAttributes; BundlePrimArray m_primitives; // Cached instances of BundlePrim. friend class ConstBundlePrim; friend class BundlePrim; friend class BundleAttrib; }; /** * Primitives in Bundle iterator. */ class ConstBundlePrimIterator { public: ConstBundlePrimIterator(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex = 0) noexcept; ConstBundlePrimIterator(ConstBundlePrimIterator const& that) noexcept = default; ConstBundlePrimIterator& operator=(ConstBundlePrimIterator const& that) noexcept = default; bool operator==(ConstBundlePrimIterator const& that) const noexcept; bool operator!=(ConstBundlePrimIterator const& that) const noexcept; ConstBundlePrim& operator*() noexcept; ConstBundlePrim* operator->() noexcept; ConstBundlePrimIterator& operator++() noexcept; private: ConstBundlePrims* m_bundlePrims; BundlePrimIndex m_primIndex; }; /** * Attributes in Primitive iterator. */ class ConstBundlePrimAttrIterator { public: ConstBundlePrimAttrIterator(ConstBundlePrim& bundlePrim, ConstBundlePrim::AttrMapIteratorType attrIter) noexcept; ConstBundlePrimAttrIterator(ConstBundlePrimAttrIterator const& that) noexcept = default; ConstBundlePrimAttrIterator& operator=(ConstBundlePrimAttrIterator const& that) noexcept = default; bool operator==(ConstBundlePrimAttrIterator const& that) const noexcept; bool operator!=(ConstBundlePrimAttrIterator const& that) const noexcept; BundleAttrib const& operator*() const noexcept; BundleAttrib const* operator->() const noexcept; ConstBundlePrimAttrIterator& operator++() noexcept; private: ConstBundlePrim* m_bundlePrim; ConstBundlePrim::AttrMapIteratorType m_attrIter; }; } // namespace core } // namespace graph } // namespace omni #include "ConstBundlePrimsImpl.h"
omniverse-code/kit/include/omni/graph/core/IBundleChanges.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "bundle/IBundleChanges1.h"
omniverse-code/kit/include/omni/graph/core/iAttributeData.h
// Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/Handle.h> #include <omni/graph/core/Type.h> namespace omni { namespace graph { namespace core { using RawPtr = uint8_t*; //!< Type for casting byte arrays to actual values using ConstRawPtr = uint8_t const*; //!< Type for casting const byte arrays to actual values // ====================================================================== /** Interface to data belonging to a specific attribute */ struct IAttributeData { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IAttributeData", 1, 6); /** * Gets the name of the attribute containing this attribute data. * * The attribute data handle may refer to attribute data from an attribute that is upstream * of the attribute from which the handle was retrieved, in which case, this will * return the upstream attribute's name. * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle The handle to the attribute whose name is being requested * @return A NameToken representing the attribute's name, for which the text can be retrieved * using IToken::getText */ NameToken (CARB_ABI* getName)(const GraphContextObj& contextObj, ConstAttributeDataHandle handle); /** * Gets the name of the type of this attribute data. Use getType to get a representation of * the type that is easier to interpret in code. * * The actual attribute data may have a different type than an associated attribute on a node, * for example, if the upstream source of the data has a different type. * This function returns the type of the data, not the type of the associated attribute. * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle The handle to the attribute data whose type name is being requested * @return A NameToken representing the attribute data's type name, for which the text can be retrieved * using IToken::getText */ NameToken (CARB_ABI* getTypeName)(const GraphContextObj& contextObj, ConstAttributeDataHandle handle); /** * Gets the type of this attribute data in a representation that is easily interpreted by code. * * The actual attribute data may have a different type than an associated attribute on a node, * for example, if the upstream source of the data has a different type. * This function returns the type of the data, not the type of the associated attribute. * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle The handle to the attribute data whose type is being requested * @return A Type structure representing the attribute data's type, whose members provide * information about the type */ Type (CARB_ABI* getType)(const GraphContextObj& contextObj, ConstAttributeDataHandle handle); /** * Checks whether the type of this attribute data is an array type, i.e. array depth of 1 * (array) or 2 (array of arrays; not yet supported). * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle The handle to the attribute data to check * @return true if the attribute data is an array type, else false */ bool isArray(const GraphContextObj& contextObj, ConstAttributeDataHandle handle) { return (*getType)(contextObj, handle).arrayDepth != 0; } /** deprecated function, do not use */ void(CARB_ABI* deprecated_0)(const void**, const GraphContextObj&, const ConstAttributeDataHandle*, size_t); /** * Gets GPU pointers to the read-only GPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * const int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type const int* const**. * * If the attribute data is not on the GPU at the time of this call, but is on the CPU, * it will be copied to the GPU before returning. * * Deprecated: Use getDataRGpuAt * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested */ void (CARB_ABI* getDataRGPU)(const void** attrsOut, const GraphContextObj& contextObj, const ConstAttributeDataHandle* attrHandles, size_t attrCount); /** * Gets CPU pointers to the writable CPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type int*const**. * * If the attribute data is not on the CPU at the time of this call, but is on the GPU, * it will be copied to the CPU and invalidated on the GPU before returning. * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested */ void (CARB_ABI* getDataW)(void** attrsOut, const GraphContextObj& contextObj, const AttributeDataHandle* attrHandles, size_t attrCount); /** * Gets GPU pointers to the writable GPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type int*const**. * * If the attribute data is not on the GPU at the time of this call, but is on the CPU, * it will be copied to the GPU and invalidated on the CPU before returning. * * Deprecated: Use getDataWGpuAt * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested */ void (CARB_ABI* getDataWGPU)(void** attrsOut, const GraphContextObj& contextObj, const AttributeDataHandle* attrHandles, size_t attrCount); /** Deprecated function, do not use */ void (CARB_ABI* deprecated_1)(const GraphContextObj&, const AttributeDataHandle*, size_t); /** * Gets the number of array elements in each of the specified attributes. * * Any invalid attributes will considered to have 0 elements. Attributes that are not * arrays will be considered to have 1 element. Array of array attributes are not * yet supported. * * @param[out] countOut Array to be filled in with number of elements in each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose element counts are being requested * @param[in] attrCount Number of attributes whose element counts are being requested */ void (CARB_ABI* getElementCount)(size_t* countOut, const GraphContextObj& contextObj, const ConstAttributeDataHandle* attrHandles, size_t attrCount); /** * Sets the number of array elements in the specified array attribute. * * The array attribute's data will not be resized until a pointer to its data is requested. * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle Attribute data handle referring to an array attribute * @param[in] count Element count to which the array attribute data should be resized. */ void (CARB_ABI* setElementCount)(const GraphContextObj& contextObj, AttributeDataHandle handle, size_t count); /** * Copies the data from an existing attribute data value into this one. * As only data is being copied a name for the destination is not required and will remain unchanged. * * @param[in] destination Data location to be overwritten * @param[in] contextObj Location of both sets of attribute data * @param[in] source Data being copied */ void (CARB_ABI* copyData)(AttributeDataHandle destination, const GraphContextObj& contextObj, ConstAttributeDataHandle source); /** * Get the location and total number of bytes occupied by the readable attribute data on the CPU. * * If the data is not currently valid on the CPU the pointer returned will be nullptr. * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceR)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj, ConstRawPtr& refToData, size_t& refToSize); /** * Get the location and total number of bytes occupied by the readable attribute data on the GPU. * * If the data is not currently valid on the GPU the pointer returned will be nullptr. * If it is then it will point to GPU memory, and should not be dereferenced on the CPU side. * * Deprecated: Use getDataReferenceRGpuAt * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceRGpu)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj, ConstRawPtr& refToData, size_t& refToSize); /** * Get the location and total number of bytes occupied by the writable attribute data on the CPU. * * If the data is not currently valid on the CPU the pointer returned will be nullptr. * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceW)(const AttributeDataHandle attrHandle, const GraphContextObj& contextObj, RawPtr& refToData, size_t& refToSize); /** * Get the location and total number of bytes occupied by the writable attribute data on the GPU. * * If the data is not currently valid on the GPU the pointer returned will be nullptr. * If it is then it will point to GPU memory, and should not be dereferenced on the CPU side. * * Deprecated: Use getDataReferenceWGpuAt * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceWGpu)(const AttributeDataHandle attrHandle, const GraphContextObj& contextObj, RawPtr& refToData, size_t& refToSize); /** * Check if the cpu data of the given attribute is currently valid * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute */ bool (CARB_ABI* cpuValid)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj); /** * Check if the gpu data of the given attribute is currently valid * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute */ bool (CARB_ABI* gpuValid)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj); /** * Gets GPU pointers to the read-only GPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * const int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type const int* const**. * * If the attribute data is not on the GPU at the time of this call, but is on the CPU, * it will be copied to the GPU before returning. * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested * @param[in] whereGpuPtrs For array data, the location of the array pointer - either on the CPU or on the GPU */ void (CARB_ABI* getDataRGpuAt)(const void** attrsOut, const GraphContextObj& contextObj, const ConstAttributeDataHandle* attrHandles, size_t attrCount, omni::fabric::PtrToPtrKind whereGpuPtrs); /** * Gets GPU pointers to the writable GPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type int*const**. * * If the attribute data is not on the GPU at the time of this call, but is on the CPU, * it will be copied to the GPU and invalidated on the CPU before returning. * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested * @param[in] whereGpuPtrs For array data, the location of the array pointer - either on the CPU or on the GPU */ void (CARB_ABI* getDataWGpuAt)(void** attrsOut, const GraphContextObj& contextObj, const AttributeDataHandle* attrHandles, size_t attrCount, omni::fabric::PtrToPtrKind whereGpuPtrs); /** * Get the location and total number of bytes occupied by the readable attribute data on the GPU. * * If the data is not currently valid on the GPU the pointer returned will be nullptr. * If it is then it will point to GPU memory, and should not be dereferenced on the CPU side. * * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[in] contextObj Location of both sets of attribute data * @param[in] whereGpuPtrs For array data, the location of the array pointer - either on the CPU or on the GPU * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceRGpuAt)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj, omni::fabric::PtrToPtrKind whereGpuPtrs, ConstRawPtr& refToData, size_t& refToSize); /** * Get the location and total number of bytes occupied by the writable attribute data on the GPU. * * If the data is not currently valid on the GPU the pointer returned will be nullptr. * If it is then it will point to GPU memory, and should not be dereferenced on the CPU side. * * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[in] contextObj Location of both sets of attribute data * @param[in] whereGpuPtrs For array data, the location of the array pointer - either on the CPU or on the GPU * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceWGpuAt)(AttributeDataHandle attrHandle, const GraphContextObj& contextObj, omni::fabric::PtrToPtrKind whereGpuPtrs, RawPtr& refToData, size_t& refToSize); /** * Perform a conversion between 2 data types * * The actual attribute data may have a different type than an associated attribute on a node, * for example, if the upstream source of the data has a different type. * This function can be used to perform the conversion from the actual attribute data, * to the provided buffer of the type of the attribute * * @param[out] dstDataOut A pointer to the destination buffer to be filled with the result of the conversion * @param[in] dstType The type the destination buffer * @param[in] srcDataIn A pointer to the actual attribute data * @param[in] srcType The type of the attribute data pointer * @return True if a conversion exists and succeeded, False otherwise. */ bool(CARB_ABI* performConversion)(void* dstDataOut, Type dstType, void* srcDataIn, Type srcType); /** * Gets CPU pointers to the read-only CPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * const int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type const int* const**. * * If the attribute data is not on the CPU at the time of this call, but is on the GPU, * it will be copied to the CPU before returning. * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested */ void (CARB_ABI* getDataR)(const void** attrsOut, const GraphContextObj& contextObj, const ConstAttributeDataHandle* attrHandles, size_t attrCount); /** * In a vectorized context, retrieve the write handle to another instance from a given one * * @param[in] contextObj Location of attribute data * @param[in] attrHandle The source handle to offset * @param[in] offset An offset to apply to the provided handle. Can be negative. * @return A handle to the instance located at the provided offset relative to the provided handle */ AttributeDataHandle(CARB_ABI* moveToAnotherInstanceW)(const GraphContextObj& contextObj, AttributeDataHandle attrHandle, int offset); /** * In a vectorized context, retrieve the read handle to another instance from a given one * * @param[in] contextObj Location of attribute data * @param[in] attrHandle The source handle to offset * @param[in] offset An offset to apply to the provided handle. Can be negative. * @return A handle to the instance located at the provided offset relative to the provided handle */ ConstAttributeDataHandle(CARB_ABI* moveToAnotherInstanceR)(const GraphContextObj& contextObj, ConstAttributeDataHandle attrHandle, int offset); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IAttributeData, moveToAnotherInstanceR, 24) } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/SimpleAttribute.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/CppWrappers.h> #include <omni/graph/core/ogn/TypeConversion.h> #include <omni/graph/core/ogn/Types.h> // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // SimpleInput Read-only wrapper for simple (POD and tuple) attribute values on CPU or GPU // SimpleOutput Writable data wrapper for simple (POD and tuple) attribute values on CPU or GPU // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== /** * Wrapper template that handles POD input attribute values. * Its unowned data points to the real data in the fabric. It provides * a consistent interface to the data with a isValid() method and an operator(), * as well as enforcing const-correctness with the values. * * Rather than split this into CPU and GPU versions some key methods are enabled by template based * on the template parameter. This avoids duplication of the common code and reduces clutter. * * The data in this class is owned by the OGN generated code, this class only maintains pointers to those references. * By doing this, those generated references can be updated from fabric and this class will automatically pick up * the change, avoiding the need for synchronization code. */ template <typename DataType, eAttributeType AttributeType, eMemoryType MemoryType = kCpu> struct SimpleAttribute { // Make const-ness aware at compile time so that this class be used in const and non-const contexts static constexpr bool readOnly = (AttributeType == ogn::kOgnInput); using data_t = DataType; using handle_t = std::conditional_t<readOnly, ConstAttributeDataHandle, AttributeDataHandle>; /** * Set up the accessor for input attributes with simple data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] role Attribute's role */ SimpleAttribute(size_t const& offset, AttributeRole role = AttributeRole::eNone) : m_role(role), m_offset(offset) { static_assert(readOnly == std::is_const<DataType>::value, "Cannot construct input attributes from non-const types"); } /** * Set up the accessor for input attributes with simple data where the data pointer is known at construction time * * @param[in] dataPtr Pointer to the attribute's data * @param[in] role Attribute's role */ SimpleAttribute(size_t const& offset, data_t* dataPtr, AttributeRole role = AttributeRole::eNone) : m_ptrToData{ dataPtr }, m_role(role), m_offset(offset) {} /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Reference to the raw fabric data extracted for use on the CPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCuda>::type> const DataType& cpu(size_t idx = 0) const { // Cast is necessary to generically handle both const and non-const internal data m_ptrToData = (data_t*)getDataR<DataType>(*m_context, m_handle); return m_ptrToData[m_offset+idx]; } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Pointer to the raw fabric data extracted for use on the GPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCpu>::type> const DataType* gpu(size_t idx = 0) const { // Cast is necessary to generically handle both const and non-const internal data m_ptrToData = (data_t*)getDataRGPU<DataType>(*m_context, m_handle); return m_ptrToData + m_offset + idx; } /** * Set the context. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] context The graph context to which the attribute belongs */ void setContext(const GraphContextObj& context) { m_context = &context; if (m_ptrToData) { OptionalMethod::setContext<data_t>(*m_ptrToData, context); } } /** * Set the attribute handle. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] handle Handle to the attribute to which the attribute belongs */ void setHandle(handle_t handle) { m_handle = handle; } /** * @return Role of the managed attribute */ AttributeRole role() const { return m_role; } /** * @return True if the underlying attribute data is valid for accessing */ template <eMemoryType Type = MemoryType> bool isValid() const { return m_handle.isValid(); } protected: mutable data_t* m_ptrToData{ nullptr }; //!< Cached pointer to fabric data AttributeRole m_role{ AttributeRole::eNone }; //!< Role interpretation for the attribute this struct manages const GraphContextObj* m_context{ nullptr }; //!< ABI OmniGraph object, for JIT access to data handle_t m_handle{ handle_t::invalidValue() }; //!< Handle to this attribute's data, for JIT access to data mutable Type m_originalDataType{ BaseDataType::eUnknown }; //!< The actual type of the underlying data in fabric //!< (used for auto conversion) size_t const& m_offset; //!< An offset (in terms of object count) to apply to the pointer to access the object }; // ====================================================================== /** * Wrapper template that handles POD input attribute values. */ template <typename DataType, eMemoryType MemoryType = kCpu> struct SimpleInput : public SimpleAttribute<std::add_const_t<DataType>, kOgnInput, MemoryType> { using parent_t = SimpleAttribute<DataType, kOgnInput, MemoryType>; using data_t = typename parent_t::data_t; using handle_t = typename parent_t::handle_t; /** * Set up the accessor for input attributes with simple data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] role: Attribute's role */ SimpleInput(size_t const& offset, AttributeRole role = AttributeRole::eNone) : parent_t(offset, role) { } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Reference to the raw fabric data; only enabled when it lives on the CPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> const DataType& operator()(size_t idx = 0) const { auto& data = this->m_ptrToData == nullptr ? this->template cpu<Type>(idx) : this->m_ptrToData[idx+this->m_offset]; if (this->m_originalDataType.baseType == BaseDataType::eUnknown) { if (this->m_context) this->m_originalDataType = this->m_context->iAttributeData->getType(*this->m_context, this->m_handle); else return data; } return *converter.convertValue(&data, this->m_originalDataType); } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Pointer to the raw flatcache data (or the converted value); only enabled when it lives on the GPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCuda>::type> const DataType* operator()(size_t idx = 0) const { if (!this->m_context->iAttributeData->gpuValid(this->m_handle, *this->m_context)) this->m_ptrToData = nullptr; return this->m_ptrToData == nullptr ? this->template gpu<Type>(idx) : (this->m_ptrToData + idx + this->m_offset); } /** * @return True if the attribute can be accessed for vectorized compute */ bool const canVectorize() const { if (this->m_originalDataType.baseType == BaseDataType::eUnknown) { if (this->m_context) this->m_originalDataType = this->m_context->iAttributeData->getType(*this->m_context, this->m_handle); else return false; } return converter.willConvert(this->m_originalDataType) == false; } /** * @param[in] count: The number of instances available for vectorized access * @return A span for the vectorized range if available. If not available, user must call operator() in a loop with incremented indices instead */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> gsl::span<DataType const> vectorized(size_t count) const { auto& data = this->m_ptrToData == nullptr ? this->template cpu<Type>() : this->m_ptrToData[this->m_offset]; if (this->m_originalDataType.baseType == BaseDataType::eUnknown) { if (this->m_context) this->m_originalDataType = this->m_context->iAttributeData->getType(*this->m_context, this->m_handle); else return { &data, count }; } if (converter.willConvert(this->m_originalDataType)) { if (count != 1) return {}; return { converter.convertValue(&data, this->m_originalDataType), count /*= 1*/ }; } return { &data, count }; } /** * @param[in] index: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return A copy of the underlying ABI data handle for the attribute */ handle_t abi_handle(size_t index = 0) const { size_t const idx = this->m_offset + index; return idx == 0 ? this->m_handle : this->m_context->iAttributeData->moveToAnotherInstanceR(*this->m_context, this->m_handle, (int)idx); } private: Converter<DataType> converter; }; // ====================================================================== /** * Wrapper template that handles POD output attribute values. * It adds methods that provide write access to the underlying attribute data onto the functionality of SimpleInput. */ template <typename DataType, eAttributeType AttributeType, eMemoryType MemoryType = kCpu> struct SimpleWritableAttribute : public SimpleAttribute<DataType, AttributeType, MemoryType> { using parent_t = SimpleAttribute<DataType, AttributeType, MemoryType>; using data_t = typename parent_t::data_t; using handle_t = typename parent_t::handle_t; /** * Set up the accessor for output attributes with simple data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] Attribute's role */ SimpleWritableAttribute(size_t const& offset, AttributeRole role = AttributeRole::eNone) : parent_t(offset, role) { } /** * Query if the attribute can be accessed in a vectorized manner * @return True: SimpleWritableAttribute can always vectorize */ bool const canVectorize() const { return true; } /** * @return A span for the vectorized range */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> gsl::span<DataType> vectorized(size_t count) { if (this->m_ptrToData) return { this->m_ptrToData + this->m_offset, count }; return { &cpu(), count }; } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Reference to the raw fabric data; only enabled when it lives on the CPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> DataType& operator()(size_t idx = 0) const { if (this->m_ptrToData) return this->m_ptrToData[idx+this->m_offset]; return cpu(idx); } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Pointer to the raw fabric data; only enabled when it lives on the GPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCuda>::type> DataType* operator()(size_t idx = 0) const { if (!this->m_context->iAttributeData->gpuValid(this->m_handle, *this->m_context)) this->m_ptrToData = nullptr; if (this->m_ptrToData) return this->m_ptrToData + idx + this->m_offset; return gpu(idx); } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Reference to the raw fabric data extracted for use on the CPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCuda>::type> DataType& cpu(size_t idx = 0) const { this->m_ptrToData = getDataW<DataType>(*this->m_context, this->m_handle); return this->m_ptrToData[idx+this->m_offset]; } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Pointer to the raw fabric data extracted for use on the GPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCpu>::type> DataType* gpu(size_t idx = 0) const { this->m_ptrToData = getDataWGPU<DataType>(*this->m_context, this->m_handle); return this->m_ptrToData + idx + this->m_offset; } /** * @param[in] index: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return A copy of the underlying ABI data handle for the attribute */ handle_t abi_handle(size_t index = 0) const { size_t const idx = this->m_offset + index; return idx == 0 ? this->m_handle : this->m_context->iAttributeData->moveToAnotherInstanceW(*this->m_context, this->m_handle, (int)idx); } }; // Typedefs to differentiate state from output types template <typename DataType, eMemoryType MemoryType = kCpu> using SimpleOutput = SimpleWritableAttribute<DataType, kOgnOutput, MemoryType>; template <typename DataType, eMemoryType MemoryType = kCpu> using SimpleState = SimpleWritableAttribute<DataType, kOgnState, MemoryType>; // Backward compatibility for previously existing data types template <typename DataType> using DualInput = SimpleInput<DataType, kAny>; template <typename DataType> using DualOutput = SimpleOutput<DataType, kAny>; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/ArrayAttribute.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // ArrayInput Read-only wrapper for attributes that are arrays of values on CPU or GPU // ArrayOutput Writable wrapper for attributes that are arrays of values on CPU or GPU // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/CppWrappers.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/ogn/array.h> #include <omni/graph/core/ogn/string.h> #include <carb/InterfaceUtils.h> #include <omni/fabric/Enums.h> using omni::fabric::PtrToPtrKind; namespace omni { namespace graph { namespace core { namespace ogn { // ============================================================================================================== /** House the shared data types that will be used by all array type accessors. * Provides typedefs for data access with similar but subtly different types, e.g. const versus non-const * Its unowned data points to the real data in the fabric. It provides a consistent interface to the data, * with an isValid() method to use for compute validation and an appropriate operator() for data extraction. */ template <typename DataType, eAttributeType AttributeType, eMemoryType MemoryType = kCpu, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayAttribute { // Make const-ness aware at compile time so that this class be used in const and non-const contexts static constexpr bool readOnly = (AttributeType == ogn::kOgnInput); using this_t = ArrayAttribute<DataType, AttributeType, MemoryType, GpuPtrType>; using data_t = DataType; using array_t = std::conditional_t< readOnly, std::conditional_t<std::is_same<const char, DataType>::value, const_string, const_array<std::remove_const_t<DataType>>>, std::conditional_t<std::is_same<char, DataType>::value, string, array<DataType>> >; using handle_t = std::conditional_t<readOnly, ConstAttributeDataHandle, AttributeDataHandle>; /** * Set up the accessor for attributes with array data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] role Attribute's role */ ArrayAttribute(size_t const& offset, AttributeRole role = AttributeRole::eNone) : m_role(role), m_offset(offset), m_currentOffset(offset) { static_assert(readOnly == std::is_const<DataType>::value, "Cannot construct input attributes from non-const types"); } /** * @return Role of the managed attribute */ AttributeRole role() const { return m_role; } /** * Set the context. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] context The graph context to which the array belongs */ void setContext(const GraphContextObj& context) { m_arrayData.setContext(context); } /** * Set the attribute handle. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] handle Handle to the attribute to which the array belongs */ void setHandle(handle_t handle) { m_arrayData.setHandle(handle); } /** * Perform any action necessary before computation happens */ void preCompute() { //array data needs to be re-fetched every frame, // as any external change of the value might have invalidated the pointers m_arrayData.setDirty(); } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return Reference to an array wrapper around the raw fabric CPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> const array_t& operator()(size_t index = 0) const { return cpu(index); } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return Reference to an array wrapper around the raw fabric GPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCuda>::type> const DataType** operator()(size_t index = 0) const { return gpu(index); } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return Reference to an array wrapper around the raw fabric CPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCuda>::type> const array_t& cpu(size_t index = 0) const { adjustHandle(index); return m_arrayData; } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return Reference to GPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCpu>::type> auto gpu(size_t index = 0) const { return gpuGet(abi_handle(index), context()); } /** * Query the size of the underlying array * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return the size of the underlying array */ const size_t size(size_t index = 0) const { size_t count = 0; ConstAttributeDataHandle chdl = abi_handle(index); context()->iAttributeData->getElementCount(&count, *context(), &chdl, 1); return count; } /** * @return True if the underlying attribute data is valid for accessing */ bool isValid() const { return m_arrayData.isValid(); } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return A copy of the underlying ABI data handle for the attribute */ handle_t abi_handle(size_t index = 0) const { adjustHandle(index); return m_arrayData.m_handle; } //@deprecated use abi_handle instead [[deprecated("Calling handle() is deprecated. Use abi_handle() instead")]] inline handle_t handle(size_t index = 0) const { return abi_handle(index);} /** * @return A copy of the underlying ABI data handle for the attribute */ GraphContextObj const* context() const { return m_arrayData.m_context; } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @param[in] defValue: The default value to return if the array is empty * @return the first item in the array, or the default value if the array is empty */ const DataType& firstOrDefault(size_t index = 0, const DataType& defValue = DataType()) { return size(index) ? (*this)(index)[0] : defValue; } protected: /** * Make the handle point to the proper instance, referred by its index, in a vectorized context * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database */ void adjustHandle(size_t index) const { if (m_arrayData.isValid()) { size_t wantedIndex = m_offset + index; if (m_currentOffset != wantedIndex) { const_cast<array_t&>(m_arrayData).adjustHandle(wantedIndex - m_currentOffset); m_currentOffset = wantedIndex; } } } protected: //! Role interpretation for the attribute this struct manages AttributeRole m_role{ AttributeRole::eNone }; //! Helper for accessing array data (last as it uses the others in initializing) array_t m_arrayData; //! Offset (in terms of objects) to apply to the fabric pointer to access the current object size_t const& m_offset; //! Offset at which the array data is currently configured size_t mutable m_currentOffset{ 0 }; private: //select appropriate ABI function based on handle type static const DataType** gpuGet(ConstAttributeDataHandle const& hdl, GraphContextObj const* ctx) { const DataType** ptrToData{ nullptr }; ctx->iAttributeData->getDataRGpuAt((const void**)&ptrToData, *ctx, &hdl, 1, GpuPtrType); return ptrToData; } static DataType** gpuGet(AttributeDataHandle const& hdl, GraphContextObj const* ctx) { DataType** ptrToData{ nullptr }; ctx->iAttributeData->getDataWGpuAt((void**)&ptrToData, *ctx, &hdl, 1, GpuPtrType); return ptrToData; } }; // ====================================================================== /** * Wrapper template that handles arrays of input attribute values. */ template <typename DataType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using ArrayInput = ArrayAttribute<std::add_const_t<DataType>, kOgnInput, MemoryType, GpuPtrType>; // ====================================================================== /** * Handle arrays of output attribute values. * It adds methods that provide write access to the underlying attribute data onto the functionality of ArrayInput. */ template <typename DataType, eAttributeType AttributeType, eMemoryType MemoryType = kCpu, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayWritableAttribute : public ArrayAttribute<DataType, AttributeType, MemoryType, GpuPtrType> { // Make const-ness aware at compile time so that this class be used in const and non-const contexts static constexpr bool readOnly = (AttributeType == ogn::kOgnInput); using parent_t = ArrayAttribute<DataType, AttributeType, MemoryType, GpuPtrType>; using data_t = typename parent_t::data_t; using handle_t = typename parent_t::handle_t; using array_t = typename parent_t::array_t; using this_t = ArrayWritableAttribute<DataType, AttributeType, MemoryType, GpuPtrType>; /** * Set up the accessor for output attributes with array data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] role Attribute's role */ ArrayWritableAttribute(size_t const& offset, AttributeRole role = AttributeRole::eNone) : parent_t(offset, role) { } /** * @param[in] offset: A reference to the instance offset currently pointed by owning database * @return Reference to an array wrapper around the raw fabric CPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> array_t& operator()(size_t index = 0) { //always go through fetch for array as it has special treatment in datamodel return cpu(index); } /** * @param[in] offset: A reference to the instance offset currently pointed by owning database * @return Reference to an array wrapper around the raw fabric GPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCuda>::type> data_t** operator()(size_t index = 0) { // always go through fetch for array as it has special treatment in datamodel return gpu(index); } /** * @param[in] offset: A reference to the instance offset currently pointed by owning database * @return Reference to an array wrapper around the raw fabric CPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCuda>::type> array_t& cpu(size_t index = 0) { auto const* const_this = this; auto const& ret = const_this->parent_t::cpu(index); return const_cast<array_t&>(ret); } /** * @param[in] offset: A reference to the instance offset currently pointed by owning database * @return Reference to an array wrapper around the raw fabric GPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCpu>::type> data_t** gpu(size_t index = 0) { auto const* const_this = this; auto ret = const_this->parent_t::gpu(index); return const_cast<data_t**>(ret); } /** * Resize of the underlying array * @input newSize: the size to set for the underlying array * @input index: the index of the instance to query in a vectorized context */ const void resize(size_t newSize, size_t index = 0) { auto& ctx = *this->context(); ctx.iAttributeData->setElementCount(ctx, this->abi_handle(index), newSize); this->m_arrayData.setDirty(); } /** * Copy some array data to another through the ABI * Will handle all the underlying optimizations (such as CoW or DataStealing) * * @return Reference to itself */ this_t& operator=(const ArrayInput<DataType, MemoryType, GpuPtrType>& toBeCopied) { return shallowCopy(toBeCopied);} // @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database this_t& shallowCopy(const ArrayInput<DataType, MemoryType, GpuPtrType>& toBeCopied, size_t index = 0) { const IAttributeData& iData = *(this->context()->iAttributeData); iData.copyData(this->abi_handle(index), *this->context(), toBeCopied.abi_handle(index)); this->m_arrayData.setDirty(); return *this; } }; // Convenience types for distinguishing output and state attributes template <typename DataType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using ArrayOutput = ArrayWritableAttribute<DataType, kOgnOutput, MemoryType, GpuPtrType>; template <typename DataType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using ArrayState = ArrayWritableAttribute<DataType, kOgnState, MemoryType, GpuPtrType>; // Backward compatibility for previously existing data types template <typename DataType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using DualArrayInput = ArrayInput<DataType, kAny, GpuPtrType>; template <typename DataType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using DualArrayOutput = ArrayOutput<DataType, kAny, GpuPtrType>; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/UsdTypes.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // This file contains helper utilities for managing casting and interpretation of the USD // data types. Keeping this separate enables the ability for any other libraries to // provide their own type casting, so that the OGN code can use native types only. #include <omni/graph/core/PreUsdInclude.h> #include <pxr/base/gf/half.h> #include <pxr/base/gf/matrix2d.h> #include <pxr/base/gf/matrix3d.h> #include <pxr/base/gf/matrix4d.h> #include <pxr/base/gf/vec2d.h> #include <pxr/base/gf/vec2f.h> #include <pxr/base/gf/vec2h.h> #include <pxr/base/gf/vec2i.h> #include <pxr/base/gf/vec3d.h> #include <pxr/base/gf/vec3f.h> #include <pxr/base/gf/vec3h.h> #include <pxr/base/gf/vec3i.h> #include <pxr/base/gf/vec4d.h> #include <pxr/base/gf/vec4f.h> #include <pxr/base/gf/vec4h.h> #include <pxr/base/gf/vec4i.h> #include <pxr/base/gf/quatd.h> #include <pxr/base/gf/quatf.h> #include <pxr/base/gf/quath.h> #include <pxr/usd/sdf/path.h> #include <pxr/usd/sdf/timeCode.h> #include <pxr/base/tf/token.h> #include <omni/graph/core/PostUsdInclude.h> #include <omni/fabric/IPath.h> #include <omni/fabric/IToken.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/ogn/TypeTraits.h> namespace omni { namespace graph { namespace core { namespace ogn { // GfHalf, TfToken, SdfPath, and SdfTimeCode are the base types that allow special casting template <> struct attribute_base_t<pxr::GfHalf> { static constexpr BaseDataType value = BaseDataType::eHalf; }; template <> struct attribute_base_t<pxr::TfToken> { static constexpr BaseDataType value = BaseDataType::eToken; }; template <> struct attribute_base_t<pxr::SdfPath> { static constexpr BaseDataType value = BaseDataType::eRelationship; }; template <> struct attribute_base_t<pxr::SdfTimeCode> { static constexpr BaseDataType value = BaseDataType::eDouble; }; // All of the matrix and vector types have USD implementations template <> struct attribute_base_t<pxr::GfMatrix2d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfMatrix3d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfMatrix4d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfVec2d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfVec2f> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<pxr::GfVec2h> { static constexpr BaseDataType value = BaseDataType::eHalf; }; template <> struct attribute_base_t<pxr::GfVec2i> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<pxr::GfVec3d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfVec3f> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<pxr::GfVec3h> { static constexpr BaseDataType value = BaseDataType::eHalf; }; template <> struct attribute_base_t<pxr::GfVec3i> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<pxr::GfVec4d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfVec4f> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<pxr::GfVec4h> { static constexpr BaseDataType value = BaseDataType::eHalf; }; template <> struct attribute_base_t<pxr::GfVec4i> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<pxr::GfQuatd> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfQuatf> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<pxr::GfQuath> { static constexpr BaseDataType value = BaseDataType::eHalf; }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni // Simple type casting from the internal Fabric types to their equivalent USD types inline const pxr::TfToken& asTfToken(const omni::fabric::TokenC& token) { return reinterpret_cast<const pxr::TfToken&>(token); } inline pxr::TfToken& asTfToken(omni::fabric::TokenC& token) { return reinterpret_cast<pxr::TfToken&>(token); } inline const pxr::SdfPath& asSdfPath(const omni::fabric::PathC& path) { return reinterpret_cast<const pxr::SdfPath&>(path); } inline pxr::SdfPath& asSdfPath(omni::fabric::PathC& path) { return reinterpret_cast<pxr::SdfPath&>(path); } namespace omni { namespace graph { namespace core { namespace ogn { // ============================================================================================================== // Specializing the attribute_type_traits gives more options for casting data extracted from RuntimeAttributes. // // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfHalf>(); template <> struct attribute_type_traits<pxr::GfHalf> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = true; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfHalf>::value; using actual_t = pxr::GfHalf; using element_t = pxr::GfHalf; using data_t = pxr::GfHalf; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfHalf[]>(); template <> struct attribute_type_traits<pxr::GfHalf[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = true; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfHalf>::value; using actual_t = pxr::GfHalf[]; using element_t = pxr::GfHalf; using data_t = pxr::GfHalf; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::TfToken>(); template <> struct attribute_type_traits<pxr::TfToken> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = true; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::TfToken>::value; using actual_t = pxr::TfToken; using element_t = pxr::TfToken; using data_t = pxr::TfToken; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::TfToken[]>(); template <> struct attribute_type_traits<pxr::TfToken[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = true; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::TfToken>::value; using actual_t = pxr::TfToken[]; using element_t = pxr::TfToken; using data_t = pxr::TfToken; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::SdfPath>(); template <> struct attribute_type_traits<pxr::SdfPath> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = true; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::SdfPath>::value; using actual_t = pxr::SdfPath; using element_t = pxr::SdfPath; using data_t = pxr::SdfPath; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::SdfPath[]>(); template <> struct attribute_type_traits<pxr::SdfPath[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = true; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::SdfPath>::value; using actual_t = pxr::SdfPath[]; using element_t = pxr::SdfPath; using data_t = pxr::SdfPath; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::SdfTimeCode>(); template <> struct attribute_type_traits<pxr::SdfTimeCode> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = true; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::SdfTimeCode>::value; using actual_t = pxr::SdfTimeCode; using element_t = pxr::SdfTimeCode; using data_t = pxr::SdfTimeCode; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::SdfTimeCode[]>(); template <> struct attribute_type_traits<pxr::SdfTimeCode[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = true; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::SdfTimeCode>::value; using actual_t = pxr::SdfTimeCode[]; using element_t = pxr::SdfTimeCode; using data_t = pxr::SdfTimeCode; static constexpr int tupleCount = 1; };// -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix2d>(); template <> struct attribute_type_traits<pxr::GfMatrix2d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix2d>::value; using actual_t = pxr::GfMatrix2d; using element_t = double; using data_t = pxr::GfMatrix2d; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix2d[]>(); template <> struct attribute_type_traits<pxr::GfMatrix2d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix2d>::value; using actual_t = pxr::GfMatrix2d[]; using element_t = double; using data_t = pxr::GfMatrix2d; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix3d>(); template <> struct attribute_type_traits<pxr::GfMatrix3d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix3d>::value; using actual_t = pxr::GfMatrix3d; using element_t = double; using data_t = pxr::GfMatrix3d; static constexpr int tupleCount = 9; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix3d[]>(); template <> struct attribute_type_traits<pxr::GfMatrix3d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix3d>::value; using actual_t = pxr::GfMatrix3d[]; using element_t = double; using data_t = pxr::GfMatrix3d; static constexpr int tupleCount = 9; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix4d>(); template <> struct attribute_type_traits<pxr::GfMatrix4d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix4d>::value; using actual_t = pxr::GfMatrix4d; using element_t = double; using data_t = pxr::GfMatrix4d; static constexpr int tupleCount = 16; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix4d[]>(); template <> struct attribute_type_traits<pxr::GfMatrix4d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix4d>::value; using actual_t = pxr::GfMatrix4d[]; using element_t = double; using data_t = pxr::GfMatrix4d; static constexpr int tupleCount = 16; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2d>(); template <> struct attribute_type_traits<pxr::GfVec2d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2d>::value; using actual_t = pxr::GfVec2d; using element_t = double; using data_t = pxr::GfVec2d; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2d[]>(); template <> struct attribute_type_traits<pxr::GfVec2d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2d>::value; using actual_t = pxr::GfVec2d[]; using element_t = double; using data_t = pxr::GfVec2d; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2f>(); template <> struct attribute_type_traits<pxr::GfVec2f> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2f>::value; using actual_t = pxr::GfVec2f; using element_t = float; using data_t = pxr::GfVec2f; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2f[]>(); template <> struct attribute_type_traits<pxr::GfVec2f[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2f>::value; using actual_t = pxr::GfVec2f[]; using element_t = float; using data_t = pxr::GfVec2f; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2h>(); template <> struct attribute_type_traits<pxr::GfVec2h> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2h>::value; using actual_t = pxr::GfVec2h; using element_t = pxr::GfHalf; using data_t = pxr::GfVec2h; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2h[]>(); template <> struct attribute_type_traits<pxr::GfVec2h[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2h>::value; using actual_t = pxr::GfVec2h[]; using element_t = pxr::GfHalf; using data_t = pxr::GfVec2h; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2i>(); template <> struct attribute_type_traits<pxr::GfVec2i> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2i>::value; using actual_t = pxr::GfVec2i; using element_t = int; using data_t = pxr::GfVec2i; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2i[]>(); template <> struct attribute_type_traits<pxr::GfVec2i[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2i>::value; using actual_t = pxr::GfVec2i[]; using element_t = int; using data_t = pxr::GfVec2i; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3d>(); template <> struct attribute_type_traits<pxr::GfVec3d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3d>::value; using actual_t = pxr::GfVec3d; using element_t = double; using data_t = pxr::GfVec3d; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3d[]>(); template <> struct attribute_type_traits<pxr::GfVec3d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3d>::value; using actual_t = pxr::GfVec3d[]; using element_t = double; using data_t = pxr::GfVec3d; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3f>(); template <> struct attribute_type_traits<pxr::GfVec3f> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3f>::value; using actual_t = pxr::GfVec3f; using element_t = float; using data_t = pxr::GfVec3f; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3f[]>(); template <> struct attribute_type_traits<pxr::GfVec3f[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3f>::value; using actual_t = pxr::GfVec3f[]; using element_t = float; using data_t = pxr::GfVec3f; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3h>(); template <> struct attribute_type_traits<pxr::GfVec3h> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3h>::value; using actual_t = pxr::GfVec3h; using element_t = pxr::GfHalf; using data_t = pxr::GfVec3h; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3h[]>(); template <> struct attribute_type_traits<pxr::GfVec3h[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3h>::value; using actual_t = pxr::GfVec3h[]; using element_t = pxr::GfHalf; using data_t = pxr::GfVec3h; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3i>(); template <> struct attribute_type_traits<pxr::GfVec3i> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3i>::value; using actual_t = pxr::GfVec3i; using element_t = int; using data_t = pxr::GfVec3i; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3i[]>(); template <> struct attribute_type_traits<pxr::GfVec3i[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3i>::value; using actual_t = pxr::GfVec3i[]; using element_t = int; using data_t = pxr::GfVec3i; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4d>(); template <> struct attribute_type_traits<pxr::GfVec4d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4d>::value; using actual_t = pxr::GfVec4d; using element_t = double; using data_t = pxr::GfVec4d; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4d[]>(); template <> struct attribute_type_traits<pxr::GfVec4d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4d>::value; using actual_t = pxr::GfVec4d[]; using element_t = double; using data_t = pxr::GfVec4d; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4f>(); template <> struct attribute_type_traits<pxr::GfVec4f> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4f>::value; using actual_t = pxr::GfVec4f; using element_t = float; using data_t = pxr::GfVec4f; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4f[]>(); template <> struct attribute_type_traits<pxr::GfVec4f[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4f>::value; using actual_t = pxr::GfVec4f[]; using element_t = float; using data_t = pxr::GfVec4f; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4h>(); template <> struct attribute_type_traits<pxr::GfVec4h> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4h>::value; using actual_t = pxr::GfVec4h; using element_t = pxr::GfHalf; using data_t = pxr::GfVec4h; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4h[]>(); template <> struct attribute_type_traits<pxr::GfVec4h[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4h>::value; using actual_t = pxr::GfVec4h[]; using element_t = pxr::GfHalf; using data_t = pxr::GfVec4h; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4i>(); template <> struct attribute_type_traits<pxr::GfVec4i> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4i>::value; using actual_t = pxr::GfVec4i; using element_t = int; using data_t = pxr::GfVec4i; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4i[]>(); template <> struct attribute_type_traits<pxr::GfVec4i[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4i>::value; using actual_t = pxr::GfVec4i[]; using element_t = int; using data_t = pxr::GfVec4i; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuatd>(); template <> struct attribute_type_traits<pxr::GfQuatd> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuatd>::value; using actual_t = pxr::GfQuatd; using element_t = double; using data_t = pxr::GfQuatd; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuatd[]>(); template <> struct attribute_type_traits<pxr::GfQuatd[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuatd>::value; using actual_t = pxr::GfQuatd[]; using element_t = double; using data_t = pxr::GfQuatd; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuatf>(); template <> struct attribute_type_traits<pxr::GfQuatf> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuatf>::value; using actual_t = pxr::GfQuatf; using element_t = float; using data_t = pxr::GfQuatf; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuatf[]>(); template <> struct attribute_type_traits<pxr::GfQuatf[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuatf>::value; using actual_t = pxr::GfQuatf[]; using element_t = float; using data_t = pxr::GfQuatf; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuath>(); template <> struct attribute_type_traits<pxr::GfQuath> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuath>::value; using actual_t = pxr::GfQuath; using element_t = pxr::GfHalf; using data_t = pxr::GfQuath; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuath[]>(); template <> struct attribute_type_traits<pxr::GfQuath[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuath>::value; using actual_t = pxr::GfQuath[]; using element_t = pxr::GfHalf; using data_t = pxr::GfQuath; static constexpr int tupleCount = 4; }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/OmniGraphNodeABI.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/ComputeGraph.h> #include <omni/graph/core/IGraphRegistry.h> #include <omni/graph/core/TemplateUtils.h> #include <omni/graph/core/ogn/Database.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/unstable/INodeTypeForwarding.h> #include <carb/profiler/Profile.h> #include <carb/Framework.h> #include <carb/logging/Log.h> // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // OmniGraphNode_ABI Templated base class for generated OmniGraph node type definitions // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= // Uncomment the first line to see debug output, the second to see nothing // #define OGN_DBG #define OGN_DBG if (false) #include <iostream> namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== // Function declarations for registering and deregistering node types. Used so that a node type // can either use the static functions available within the core or the ABI functions outside of it. using OmniGraphNodeRegisterFn = void (*)(const omni::graph::core::INodeType&, int); using OmniGraphNodeDeregisterFn = void (*)(const char*); using OmniGraphNodeRegisterAliasFn = void (*)(const omni::graph::core::INodeType&, const char*); // ====================================================================== // The has_XXX templated types are a set of metaprograms that use type dispatching in conjunction with the // is_detected template to resolve to a std::true_type when a class contains a certain function and std::false_type // when it does not. This allows compile time choice of two overloaded versions of each of the ABI functions from // the call_XXX() version of them. // // See the description of is_detected in TemplateUtils.h for more details on how the template match occurs. // // The functions defined are the ones that are used as part of the interface to the OmniGraph Node C ABI. // The formatting is consistent to make it easier to recognize and add new types: // Line 1. The template declaration // Line 2. The typename declaration, with the expected return type of the function // Line 3. The function name declaration // Lines 4+. Declarations for each of the function parameters // clang-format off // ---------------------------------------------------------------------------------------------------- // static void addInput(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*) template <class NodeTypeClass> using has_addInput = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addInput( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<const void*>(), std::declval<const size_t*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addOutput(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*) template <class NodeTypeClass> using has_addOutput = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addOutput( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<const void*>(), std::declval<const size_t*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addState(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*) template <class NodeTypeClass> using has_addState = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addState( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<const void*>(), std::declval<const size_t*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addExtendedInput(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType) template <class NodeTypeClass> using has_addExtendedInput = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addExtendedInput( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<ExtendedAttributeType>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addExtendedOutput(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType) template <class NodeTypeClass> using has_addExtendedOutput = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addExtendedOutput( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<ExtendedAttributeType>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addExtendedState(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType) template <class NodeTypeClass> using has_addExtendedState = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addExtendedState( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<ExtendedAttributeType>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool hasState() template <class NodeTypeClass> using has_hasState = typename std::is_same<bool, decltype(std::declval<const NodeTypeClass&>().hasState( std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void setHasState(bool) template <class NodeTypeClass> using has_setHasState = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().setHasState( std::declval<const NodeTypeObj&>(), std::declval<bool>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool compute(const GraphContextObj&, const NodeObj&) template <class NodeTypeClass> using has_computeABI = typename std::is_same<bool, decltype(std::declval<const NodeTypeClass&>().compute( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool compute(NodeTypeDataClass&, size_t) template <class NodeTypeClass, class NodeTypeDataClass> using has_computeOGNT = typename std::is_same<bool, decltype(std::declval<NodeTypeClass&>().compute( std::declval<NodeTypeDataClass&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool computeVectorized(const GraphContextObj&, const NodeObj&, size_t) template <class NodeTypeClass> using has_computeVectorizedABI = typename std::is_same<size_t, decltype(std::declval<const NodeTypeClass&>().computeVectorized( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool computeVectorized(NodeTypeDataClass&, size_t) template <class NodeTypeClass, class NodeTypeDataClass> using has_computeVectorizedOGNT = typename std::is_same<size_t, decltype(std::declval<NodeTypeClass&>().computeVectorized( std::declval<NodeTypeDataClass&>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static const char* getNodeType() template <class NodeTypeClass> using has_getNodeType = typename std::is_same<const char*, decltype(std::declval<const NodeTypeClass&>().getNodeType( ))>::value_type; // ---------------------------------------------------------------------------------------------------- // static const char* getTypeName(const NodeTypeObj&) template <class NodeTypeClass> using has_getTypeName = typename std::is_same<const char*, decltype(std::declval<const NodeTypeClass&>().getTypeName( std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void getScheduleNodeCount() template <class NodeTypeClass> using has_getScheduleNodeCount = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().getScheduleNodeCount( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<const ScheduleNodeObj*>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void getScheduleNodes() template <class NodeTypeClass> using has_getScheduleNodes = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().getScheduleNodes( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<const ScheduleNodeObj*>(), std::declval<size_t>(), std::declval<ScheduleNodeObj*>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void initialize(const GraphContextObj&, const NodeObj&) template <class NodeTypeClass> using has_initialize = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().initialize( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void initializeType(const NodeTypeObj&) template <class NodeTypeClass> using has_initializeType = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().initializeType( std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void registerTasks() template <class NodeTypeClass> using has_registerTasks = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().registerTasks( ))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void release(const NodeObj&) template <class NodeTypeClass> using has_release = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().release( std::declval<const NodeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool updateNodeVersion(const GraphContextObj&, const NodeObj&, int, int) template <class NodeTypeClass> using has_updateNodeVersion = typename std::is_same<bool, decltype(std::declval<const NodeTypeClass&>().updateNodeVersion( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<int>(), std::declval<int>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static size_t getAllMetadata(const NodeTypeObj&, const char**, const char**, size_t) template <class NodeTypeClass> using has_getAllMetadata = typename std::is_same<size_t, decltype(std::declval<const NodeTypeClass&>().getAllMetadata( std::declval<const NodeTypeObj&>(), std::declval<const char**>(), std::declval<const char**>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static const char* getMetadata(const NodeTypeObj&, const char*) template <class NodeTypeClass> using has_getMetadata = typename std::is_same<const char*, decltype(std::declval<const NodeTypeClass&>().getMetadata( std::declval<const NodeTypeObj&>(), std::declval<const char*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static size_t getMetadataCount(const NodeTypeObj&) template <class NodeTypeClass> using has_getMetadataCount = typename std::is_same<size_t, decltype(std::declval<const NodeTypeClass&>().getMetadataCount( std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void setMetadata(const NodeTypeObj&, const char*, const char*) template <class NodeTypeClass> using has_setMetadata = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().setMetadata( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- template <class NodeTypeClass> using has_addSubNodeType = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addSubNodeType( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- template <class NodeTypeClass> using has_getSubNodeType = typename std::is_same<NodeTypeObj, decltype(std::declval<const NodeTypeClass&>().getSubNodeType( std::declval<const NodeTypeObj&>(), std::declval<const char*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- template <class NodeTypeClass> using has_createNodeType = typename std::is_same<NodeTypeObj, decltype(std::declval<const NodeTypeClass&>().createNodeType( std::declval<const char*>(), std::declval<int>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void onConnectionTypeResolve(const NodeTypeObj&) template <class NodeTypeClass> using has_onConnectionTypeResolve = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().onConnectionTypeResolve( std::declval<const NodeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void inspect(const NodeTypeObj&, inspect::IInspector*) template <class NodeTypeClass> using has_inspect = typename std::is_same<bool, decltype(std::declval<const NodeTypeClass&>().inspect( std::declval<const NodeTypeObj&>(), std::declval<inspect::IInspector*>()))>::value_type; // clang-format on // ============================================================================================================== /** * @brief Common base class for all node type implementation definitions, so that they can be in a common container */ class NodeTypeABI { protected: // Remembering these values allow the node type to be registered and deregistered at will const char* m_nodeTypeName{ nullptr }; //!< Unique name of the node type int m_version{ 1 }; //!< Current version of the node type const char* m_extensionName{ nullptr }; //!< Extension to which this node type belongs /** * @brief Construct a new NodeTypeABI object * * @param nodeTypeName Unique name of the node type * @param version Version number of the node type * @param extensionName Extension to which this node type belongs */ NodeTypeABI(const char* nodeTypeName, int version, const char* extensionName) : m_nodeTypeName{nodeTypeName} , m_version(version) , m_extensionName(extensionName) {} public: // -------------------------------------------------------------------------------------------------------------- /** * @brief Populate an INodeType interface with the functions that implement this particular templated node type * * @param[out] nodeTypeInterface Interface to be populated */ virtual void populateNodeTypeInterface(INodeType& nodeTypeInterface) const = 0; // -------------------------------------------------------------------------------------------------------------- /** * @brief Register the node type encapsulated in this description. */ void registerNodeType(IGraphRegistry& iGraphRegistry) { INodeType iNodeType{}; populateNodeTypeInterface(iNodeType); OGN_DBG std::cout << "DBG: ABI Registration of " << m_nodeTypeName << ", version " << m_version << " from " << m_extensionName << std::endl; iGraphRegistry.registerNodeTypeInterface(iNodeType, m_version, sizeof(INodeType)); // One potential source of node type forwarding is if the node has overridden the getNodeType() method and // supplied a different name than the one generated through the .ogn file. Add that one here. if (iNodeType.getNodeType) { const char* overriddenTypeName = iNodeType.getNodeType(); if (strcmp(overriddenTypeName, m_nodeTypeName) != 0) { auto iNodeTypeForwarding = carb::getCachedInterface<ComputeGraph>()->getNodeTypeForwardingInterfacePtr(); if (iNodeTypeForwarding) { iNodeTypeForwarding->defineForward(m_nodeTypeName, m_version, overriddenTypeName, m_version, m_extensionName); } } } if (iNodeType.getScheduleNodeCount || iNodeType.getScheduleNodes) { CARB_LOG_WARN_ONCE( "%s: getScheduleNodeCount() and getScheduleNodes() are deprecated, please remove", m_nodeTypeName); } } // -------------------------------------------------------------------------------------------------------------- /** * @brief Deregister the node type encapsulated in this description. */ void deregisterNodeType(IGraphRegistry& iGraphRegistry) { iGraphRegistry.unregisterNodeType(m_nodeTypeName); } }; // ====================================================================== /** * @brief ABI proxy class for OGN generated nodes. It provides implementations for all of the * INodeType ABI functions which will call the actual node's versions of those functions * if the node defines them. * * This class uses a technique called "tag dispatching", which is a compile-time switch that decides * which version of a method will be called. By defining overloaded methods taking either the * `std::true_type` or `std::false_type` type as the first parameter, the version that is called can * be decided at compile time by using a template that instantiates one of the two. * * In this template class the methods "X()" use tag dispatching to decide which two versions of the `call_X()` * method to call. The version accepting `std::true_type` is called when an override of `X()` is detected and calls * the override directly. The version accepting std::false_type performs the default version of `X()`. * * Each of the INode ABI functions is implemented with these three functions. Here is an annotated example * of how this works for a fictional ABI method X that takes a single int argument: * * @code{.cpp} * // By making this a template it can change types at compile-time * template <class NodeTypeClass> * // std::is_same will check to see if the named type matches the return type of the declared function * using has_X = typename std::is_same<void, * // decltype extracts the type of the declared entity * // declval gives a compile-time stand-in for the declared entity * // This statement says "get the type of the value returned by a NodeTypeClass function named X" * decltype(std::declval<const NodeTypeClass&>().X( * // This line adds the requirement that the function X takes an integer argument * std::declval<int>()) * // The value_type trait gets the return type of the is_same template (std::true_type/std::false_type) * )>::value_type; * // These are two overloads of the same method * // Since the last parameters are incompatible only one can be chosen * void call_X(int value, std::true_type) {} * void call_X(int value, std::false_type) {} * // This is the public method called from outside the class * void X(int value) { * // If NodeTypeClass::X exists then is_detected<> selects the std::true_type variation * // for the first argument. Subsequent arguments are passed through from this method's arguments. * call_X(is_detected<has_X, NodeTypeClass>(), value); * } * @endcode * * For a full description of the SFINAE technique in general and the tag dispatching implementation see * https://www.bfilipek.com/2016/02/notes-on-c-sfinae.html#tag-dispatching * * The net result of this metaprogramming is that OmniGraphNode_ABI<MyNode, MyNodeDatabase> instantiates * an ABI-compatible class that calls overrides in MyNode where available and the default where not. This * is functionally equivalent to acquiring and extracting the Node ABI interface and calling the methods * on it. MyNodeDatabase is automatically generated by the .ogn processor and MyNode is the class the node * writer provides, giving them full control over the ABI implementation if they wish, and use of the * default implementation and generated helper classes if not. * * @tparam NodeTypeClass Class the user has defined for implementing the custom parts of the node type interface * @tparam NodeTypeDataClass Generated database class for @p NodeTypeClass */ template <typename NodeTypeClass, typename NodeTypeDataClass> class OmniGraphNode_ABI : public NodeTypeABI { static void call_addInput(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Override(addInput " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addInput(nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } static void call_addInput(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Default(addInput " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addOutput(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Override(addOutput " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addOutput(nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } static void call_addOutput(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Default(addOutput " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addState(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Override(addState " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addState(nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } static void call_addState(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Default(addState " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addExtendedInput(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Override(addExtendedInput " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addExtendedInput(nodeType, name, typeName, required, extendedType); } static void call_addExtendedInput(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Default(addExtendedInput " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addExtendedOutput(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Override(addExtendedOutput " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addExtendedOutput(nodeType, name, typeName, required, extendedType); } static void call_addExtendedOutput(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType) noexcept { OGN_DBG std::cout << "DBG: Default(addExtendedOutput " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addExtendedState(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Override(addExtendedState " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addExtendedState(nodeType, name, typeName, required, extendedType); } static void call_addExtendedState(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Default(addExtendedState " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static bool call_hasState(std::true_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Override(hasState)" << std::endl; return NodeTypeClass::hasState(nodeType); } static bool call_hasState(std::false_type, const NodeTypeObj&) noexcept { OGN_DBG std::cout << "DBG: Default(hasState)" << std::endl; return false; } // ---------------------------------------------------------------------- static void call_setHasState(std::true_type, const NodeTypeObj& nodeType, bool hasState) noexcept { OGN_DBG std::cout << "DBG: Override(setHasState)" << std::endl; NodeTypeClass::setHasState(nodeType, hasState); } static void call_setHasState(std::false_type, const NodeTypeObj&, bool) noexcept { OGN_DBG std::cout << "DBG: Default(setHasState)" << std::endl; } // ---------------------------------------------------------------------- template<typename FUNC> static bool call_computeCommonOGN(const GraphContextObj& context, const NodeObj& node, FUNC const& compute, size_t batchCount) { auto create = [](GraphContextObj const* contexts, NodeObj const* nodes, size_t count) -> ogn::OmniGraphDatabase* { return new NodeTypeDataClass(contexts, nodes, count); }; NodeTypeDataClass* nodeDataPtr = reinterpret_cast<NodeTypeDataClass*>(node.iNode->getOgnDatabase(node, create)); if (nodeDataPtr == nullptr || nodeDataPtr->validate() == false) return false; //warn the node we're about to compute node.iNode->increaseComputeCount(node, batchCount); // Call compute nodeDataPtr->preCompute(); bool result = compute(*nodeDataPtr); return result; } // ---------------------------------------------------------------------- template <typename T> using has_computeOGN = has_computeOGNT<T, NodeTypeDataClass>; template <typename T> using has_computeVectorizedOGN = has_computeVectorizedOGNT<T, NodeTypeDataClass>; using regOGN = is_detected<has_computeOGN, NodeTypeClass>; using regABI = is_detected<has_computeABI, NodeTypeClass>; using vecOGN = is_detected<has_computeVectorizedOGN, NodeTypeClass>; using vecABI = is_detected<has_computeVectorizedABI, NodeTypeClass>; static_assert((regOGN() || regABI()) ^ (vecOGN() || vecABI()), "Implements 'compute' xor 'computeVectorized' (ie. not both)"); //=============== template <typename UNSED1, typename UNSED2, typename UNSED3> //regOGN, regABI, vecOGN, vecABI static bool call_compute(std::true_type, UNSED1, UNSED2, UNSED3, const GraphContextObj& context, const NodeObj& node) noexcept { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[Compute][OGN] %s", node.iNode->getPrimPath(node)); return call_computeCommonOGN(context, node, NodeTypeClass::compute, 1); } //=============== template <typename UNSED1, typename UNSED2> // regOGN, regABI, vecOGN, vecABI static bool call_compute( std::false_type, std::true_type, UNSED1, UNSED2, const GraphContextObj& context, const NodeObj& node) noexcept { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[Compute][ABI] %s", node.iNode->getPrimPath(node)); node.iNode->increaseComputeCount(node, 1); return NodeTypeClass::compute(context, node); } //=============== template <typename UNSED1> // regOGN, regABI, vecOGN, vecABI static bool call_compute(std::false_type,std::false_type, std::true_type, UNSED1, const GraphContextObj& context, const NodeObj& node) noexcept { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][OGN][x1] %s", node.iNode->getPrimPath(node)); auto adapter = [](NodeTypeDataClass& db) { return NodeTypeClass::computeVectorized(db, 1) != 0; }; return call_computeCommonOGN(context, node, adapter, 1); } //=============== // regOGN, regABI, vecOGN, vecABI static bool call_compute(std::false_type,std::false_type, std::false_type, std::true_type, const GraphContextObj& context, const NodeObj& node) noexcept { CARB_PROFILE_ZONE( carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][ABI][x1] %s", node.iNode->getPrimPath(node)); node.iNode->increaseComputeCount(node, 1); return NodeTypeClass::computeVectorized(context, node, 1) != 0; } // ---------------------------------------------------------------------- //=============== template <typename UNSED1, typename UNSED2, typename UNSED3> //regOGN, regABI, vecOGN, vecABI static size_t call_computeVectorized( UNSED1, UNSED2, std::true_type, UNSED3, const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { // user has implemented the OGN version size_t ret = 0; auto adapter = [count, &ret, &node](NodeTypeDataClass& db) { db.resetToFirstInstance(); if (db.canVectorize()) // auto conversion for instance might prevent vectorization { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][OGN][x%d] %s", (int)count, node.iNode->getPrimPath(node)); ret = NodeTypeClass::computeVectorized(db, count); return true; } CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][OGN][1 by 1][x%d] %s", (int)count, node.iNode->getPrimPath(node)); auto remaining = count; db.resetToFirstInstance(); while (remaining--) { if (NodeTypeClass::computeVectorized(db, 1) != 0) ++ret; db.moveToNextInstance(); } db.resetToFirstInstance(); return true; }; call_computeCommonOGN(context, node, adapter, count); return ret; } //=============== template <typename UNSED1, typename UNSED2> //regOGN, regABI, vecOGN, vecABI static size_t call_computeVectorized( UNSED1, UNSED2, std::false_type, std::true_type, const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][ABI][x%d] %s", (int)count, node.iNode->getPrimPath(node)); // user has implemented the ABI version node.iNode->increaseComputeCount(node, count); return NodeTypeClass::computeVectorized(context, node, count); } //=============== template <typename UNSED1> //regOGN, regABI, vecOGN, vecABI static size_t call_computeVectorized( std::true_type, UNSED1, std::false_type, std::false_type, const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { // user has implemented the OGN version size_t ret = 0; auto adapter = [count, &ret, &node](NodeTypeDataClass& db) { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[Compute][OGN][1 by 1][x%d] %s", (int)count, node.iNode->getPrimPath(node)); auto remaining = count; db.resetToFirstInstance(); while (remaining--) { if (NodeTypeClass::compute(db)) ret++; db.moveToNextInstance(); } db.resetToFirstInstance(); return true; }; call_computeCommonOGN(context, node, adapter, count); return ret; } //=============== //regOGN, regABI, vecOGN, vecABI static size_t call_computeVectorized(std::false_type, std::true_type, std::false_type, std::false_type, const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { //unreachable CARB_LOG_FATAL("reaching abnormal code path in OmniGraphNodeABI"); return 0; } // ---------------------------------------------------------------------- static const char* call_getNodeType(std::true_type) noexcept { OGN_DBG std::cout << "DBG: Override(getNodeType)" << std::endl; return NodeTypeClass::getNodeType(); } static const char* call_getNodeType(std::false_type) noexcept { // The node type name must always be available, usually as the one the .ogn file specified OGN_DBG std::cout << "DBG: Default(getNodeType)" << std::endl; return sm_nodeTypeName; } // ---------------------------------------------------------------------- static const char* call_getTypeName(std::true_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Override(getTypeName)" << std::endl; return NodeTypeClass::getTypeName(nodeType); } static const char* call_getTypeName(std::false_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Default(getTypeName)" << std::endl; return sm_nodeTypeName; } // ---------------------------------------------------------------------- static size_t call_getScheduleNodeCount(std::true_type, const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize) noexcept { OGN_DBG std::cout << "DBG: Override(getScheduleNodeCount)" << std::endl; return NodeTypeClass::getScheduleNodeCount(context, node, upstreamScheduleNodesBuf, upstreamBufferSize); } static size_t call_getScheduleNodeCount(std::false_type, const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize) noexcept { OGN_DBG std::cout << "DBG: Default(getScheduleNodeCount)" << std::endl; return 0; } // ---------------------------------------------------------------------- static void call_getScheduleNodes(std::true_type, const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize, ScheduleNodeObj* scheduleNodesBuf, size_t bufferSize) noexcept { OGN_DBG std::cout << "DBG: Override(getScheduleNodes)" << std::endl; NodeTypeClass::getScheduleNodes( context, node, upstreamScheduleNodesBuf, upstreamBufferSize, scheduleNodesBuf, bufferSize); } static void call_getScheduleNodes(std::false_type, const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize, ScheduleNodeObj* scheduleNodesBuf, size_t bufferSize) noexcept { OGN_DBG std::cout << "DBG: Default(getScheduleNodes)" << std::endl; return; } // ---------------------------------------------------------------------- // The generated database class may have overrides for the initialize() method if attribute metadata is present // so this function has two layers of calls. static void call_database_initialize(std::true_type, const GraphContextObj& context, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Database Override(initialize)" << std::endl; NodeTypeDataClass::initialize(context, node); } static void call_database_initialize(std::false_type, const GraphContextObj& context, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Default(initialize)" << std::endl; return; } static void call_initialize(std::true_type, const GraphContextObj& context, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Override(initialize)" << std::endl; call_database_initialize(is_detected<has_initialize, NodeTypeDataClass>(), context, node); NodeTypeClass::initialize(context, node); } static void call_initialize(std::false_type, const GraphContextObj& context, const NodeObj& node) noexcept { call_database_initialize(is_detected<has_initialize, NodeTypeDataClass>(), context, node); return; } // ---------------------------------------------------------------------- static void call_initializeType(std::true_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Override(initializeType)" << std::endl; // Rely on the database to handle the case of state attributes, this is just for internal state data if (!std::is_empty<NodeTypeClass>::value) { nodeType.iNodeType->setHasState(nodeType, true); } NodeTypeDataClass::initializeType(nodeType); NodeTypeClass::initializeType(nodeType); } static void call_initializeType(std::false_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Default(initializeType)" << std::endl; // Rely on the database to handle the case of state attributes, this is just for internal state data if (!std::is_empty<NodeTypeClass>::value) { nodeType.iNodeType->setHasState(nodeType, true); } NodeTypeDataClass::initializeType(nodeType); } // ---------------------------------------------------------------------- static void call_registerTasks(std::true_type) noexcept { OGN_DBG std::cout << "DBG: Override(registerTasks)" << std::endl; NodeTypeClass::registerTasks(); } static void call_registerTasks(std::false_type) noexcept { OGN_DBG std::cout << "DBG: Default(registerTasks)" << std::endl; return; } // ---------------------------------------------------------------------- static void call_release(std::true_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Override(release)" << std::endl; NodeTypeClass::release(node); } static void call_release(std::false_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Default(release)" << std::endl; return; } // ---------------------------------------------------------------------- static bool call_updateNodeVersion( std::true_type, const GraphContextObj& context, const NodeObj& node, int oldVersion, int newVersion) noexcept { OGN_DBG std::cout << "DBG: Override(updateNodeVersion)" << std::endl; return NodeTypeClass::updateNodeVersion(context, node, oldVersion, newVersion); } static bool call_updateNodeVersion( std::false_type, const GraphContextObj& context, const NodeObj& node, int oldVersion, int newVersion) noexcept { OGN_DBG std::cout << "DBG: Default(updateNodeVersion)" << std::endl; return true; } // ---------------------------------------------------------------------- static size_t call_getAllMetadata( std::true_type, const NodeTypeObj& nodeType, const char** keyBuf, const char** valueBuf, size_t bufSize) noexcept { OGN_DBG std::cout << "DBG: Override(getAllMetadata)" << std::endl; return NodeTypeClass::getAllMetadata(nodeType, keyBuf, valueBuf, bufSize); } static size_t call_getAllMetadata( std::false_type, const NodeTypeObj& nodeType, const char** keyBuf, const char** valueBuf, size_t bufSize) noexcept { OGN_DBG std::cout << "DBG: Default(getAllMetadata)" << std::endl; return 0; } // ---------------------------------------------------------------------- static const char* call_getMetadata(std::true_type, const NodeTypeObj& nodeType, const char* key) noexcept { OGN_DBG std::cout << "DBG: Override(getMetadata)" << std::endl; return NodeTypeClass::getMetadata(nodeType, key); } static const char* call_getMetadata(std::false_type, const NodeTypeObj& nodeType, const char* key) noexcept { OGN_DBG std::cout << "DBG: Default(getMetadata)" << std::endl; return nullptr; } // ---------------------------------------------------------------------- static size_t call_getMetadataCount(std::true_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Override(getMetadataCount)" << std::endl; return NodeTypeClass::getMetadataCount(nodeType); } static size_t call_getMetadataCount(std::false_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Default(getMetadataCount)" << std::endl; return 0; } // ---------------------------------------------------------------------- static void call_setMetadata(std::true_type, const NodeTypeObj& nodeType, const char* key, const char* value) noexcept { OGN_DBG std::cout << "DBG: Override(setMetadata)" << std::endl; NodeTypeClass::setMetadata(nodeType, key, value); } static void call_setMetadata(std::false_type, const NodeTypeObj& nodeType, const char* key, const char* value) noexcept { OGN_DBG std::cout << "DBG: Default(setMetadata)" << std::endl; } // ---------------------------------------------------------------------- static void call_addSubNodeType(std::true_type, const NodeTypeObj& nodeType, const char* subNodeTypeName, const NodeTypeObj& subNodeType) noexcept { OGN_DBG std::cout << "DBG: Override(addSubNodeType)" << std::endl; NodeTypeClass::addSubNodeType(nodeType, subNodeTypeName, subNodeType); } static void call_addSubNodeType(std::false_type, const NodeTypeObj& nodeType, const char* subNodeTypeName, const NodeTypeObj& subNodeType) noexcept { OGN_DBG std::cout << "DBG: Default(addSubNodeType)" << std::endl; } // ---------------------------------------------------------------------- static NodeTypeObj call_getSubNodeType(std::true_type, const NodeTypeObj& nodeType, const char* subNodeTypeName) noexcept { OGN_DBG std::cout << "DBG: Override(getSubNodeType)" << std::endl; return NodeTypeClass::getSubNodeType(nodeType, subNodeTypeName); } static NodeTypeObj call_getSubNodeType(std::false_type, const NodeTypeObj& nodeType, const char* subNodeTypeName) noexcept { OGN_DBG std::cout << "DBG: Default(getSubNodeType)" << std::endl; return NodeTypeObj(); } // ---------------------------------------------------------------------- static NodeTypeObj call_createNodeType(std::true_type, const char* nodeTypeName, int version) noexcept { OGN_DBG std::cout << "DBG: Override(createNodeType)" << std::endl; return NodeTypeClass::createNodeType(nodeTypeName, version); } static NodeTypeObj call_createNodeType(std::false_type, const char* nodeTypeName, int version) noexcept { OGN_DBG std::cout << "DBG: Default(createNodeType)" << std::endl; return NodeTypeObj(); } // ---------------------------------------------------------------------- static void call_database_onConnectionTypeResolve(std::true_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Database Override(onConnectionTypeResolve)" << std::endl; NodeTypeDataClass::onConnectionTypeResolve(node); } static void call_database_onConnectionTypeResolve(std::false_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Database Default(onConnectionTypeResolve)" << std::endl; } static void call_onConnectionTypeResolve(std::true_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Override(onConnectionTypeResolve)" << std::endl; NodeTypeClass::onConnectionTypeResolve(node); call_database_onConnectionTypeResolve( is_detected<has_onConnectionTypeResolve, NodeTypeDataClass>(), node); } static void call_onConnectionTypeResolve(std::false_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Default(onConnectionTypeResolve)" << std::endl; call_database_onConnectionTypeResolve( is_detected<has_onConnectionTypeResolve, NodeTypeDataClass>(), node); } // ---------------------------------------------------------------------- static bool call_inspect( std::true_type, const NodeTypeObj& nodeType, inspect::IInspector* inspector) noexcept { OGN_DBG std::cout << "DBG: Override(inspect)" << std::endl; return NodeTypeClass::inspect(nodeType, inspector); } static bool call_inspect( std::false_type, const NodeTypeObj& nodeType, inspect::IInspector* inspector) noexcept { OGN_DBG std::cout << "DBG: Default(inspect)" << std::endl; return false; } public: // ---------------------------------------------------------------------- // These are the ABI implementations, which call the right version of the above call_DBG: functions // based on whether the node class has custom implementations of the ABI functions or not. /** * Implementation of omni::graph::core::INodeType::addInput to use as part of a node type definition */ static void addInput(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { return call_addInput(is_detected<has_addInput, NodeTypeClass>(), nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } /** * Implementation of omni::graph::core::INodeType::addOutput to use as part of a node type definition */ static void addOutput(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { return call_addOutput(is_detected<has_addOutput, NodeTypeClass>(), nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } /** * Implementation of omni::graph::core::INodeType::addState to use as part of a node type definition */ static void addState(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { return call_addState(is_detected<has_addState, NodeTypeClass>(), nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } /** * Implementation of omni::graph::core::INodeType::addExtendedInput to use as part of a node type definition */ static void addExtendedInput(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { return call_addExtendedInput(is_detected<has_addExtendedInput, NodeTypeClass>(), nodeType, name, typeName, required, extendedType); } /** * Implementation of omni::graph::core::INodeType::addExtendedOutput to use as part of a node type definition */ static void addExtendedOutput(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { return call_addExtendedOutput(is_detected<has_addExtendedOutput, NodeTypeClass>(), nodeType, name, typeName, required, extendedType); } /** * Implementation of omni::graph::core::INodeType::addExtendedState to use as part of a node type definition */ static void addExtendedState(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { return call_addExtendedState(is_detected<has_addExtendedState, NodeTypeClass>(), nodeType, name, typeName, required, extendedType); } /** * Implementation of omni::graph::core::INodeType::hasState to use as part of a node type definition */ static bool hasState(const NodeTypeObj& nodeType) noexcept { return call_hasState(is_detected<has_hasState, NodeTypeClass>(), nodeType); } /** * Implementation of omni::graph::core::INodeType::setHasState to use as part of a node type definition */ static void setHasState(const NodeTypeObj& nodeType, bool hasState) noexcept { call_setHasState(is_detected<has_setHasState, NodeTypeClass>(), nodeType, hasState); } // If the compute ABI function is overridden the user loses all of the nice setup // we've done with the data class, however we won't disallow it as there may be reasons // for doing it that aren't currently apparent. /** * Implementation of omni::graph::core::INodeType::compute to use as part of a node type definition */ static bool compute(const GraphContextObj& context, const NodeObj& node) noexcept { return call_compute(regOGN(), regABI(), vecOGN(), vecABI(), context, node); } /** * Implementation of omni::graph::core::INodeType::computeVectorized to use as part of a node type definition */ static size_t computeVectorized(const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { return call_computeVectorized(regOGN(), regABI(), vecOGN(), vecABI(), context, node, count); } /** * Implementation of omni::graph::core::INodeType::getNodeType to use as part of a node type definition */ static const char* getNodeType() { return call_getNodeType(is_detected<has_getNodeType, NodeTypeClass>()); } /** * Implementation of omni::graph::core::INodeType::getTypeName to use as part of a node type definition */ static const char* getTypeName(const NodeTypeObj& nodeType) { return call_getTypeName(is_detected<has_getTypeName, NodeTypeClass>(), nodeType); } /** * Implementation of omni::graph::core::INodeType::getScheduleNodeCount to use as part of a node type definition */ static size_t getScheduleNodeCount(const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize) noexcept { return call_getScheduleNodeCount(is_detected<has_getScheduleNodeCount, NodeTypeClass>(), context, node, upstreamScheduleNodesBuf, upstreamBufferSize); } /** * Implementation of omni::graph::core::INodeType::getScheduleNodes to use as part of a node type definition */ static void getScheduleNodes(const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize, ScheduleNodeObj* scheduleNodesBuf, size_t bufferSize) noexcept { call_getScheduleNodes(is_detected<has_getScheduleNodes, NodeTypeClass>(), context, node, upstreamScheduleNodesBuf, upstreamBufferSize, scheduleNodesBuf, bufferSize); } /** * Implementation of omni::graph::core::INodeType::initialize to use as part of a node type definition */ static void initialize(const GraphContextObj& context, const NodeObj& node) noexcept { call_initialize(is_detected<has_initialize, NodeTypeClass>(), context, node); } /** * Implementation of omni::graph::core::INodeType::initializeType to use as part of a node type definition */ static void initializeType(const NodeTypeObj& nodeType) noexcept { call_initializeType(is_detected<has_initializeType, NodeTypeClass>(), nodeType); } /** * Implementation of omni::graph::core::INodeType::registerTasks to use as part of a node type definition */ static void registerTasks() noexcept { call_registerTasks(is_detected<has_registerTasks, NodeTypeClass>()); } /** * Implementation of omni::graph::core::INodeType::release to use as part of a node type definition */ static void release(const NodeObj& node) noexcept { call_release(is_detected<has_release, NodeTypeClass>(), node); } /** * Implementation of omni::graph::core::INodeType::releaseInstace to use as part of a node type definition */ static void releaseInstance(const NodeObj& node, NameToken instanceID) noexcept { NodeTypeDataClass::release(node, instanceID); } /** * Implementation of omni::graph::core::INodeType::destroyDB to use as part of a node type definition */ static void destroyDB(const NodeObj& node, ogn::OmniGraphDatabase* db) { delete static_cast<NodeTypeDataClass*>(db); } /** * Implementation of omni::graph::core::INodeType::notifyTypeResolution to use as part of a node type definition */ static void notifyTypeResolution(AttributeObj const& attrib, ogn::OmniGraphDatabase* db) { static_cast<NodeTypeDataClass*>(db)->onTypeResolutionChanged(attrib); } /** * Implementation of omni::graph::core::INodeType::notifyDynamicAttributeChanged to use as part of a node type definition */ static void notifyDynamicAttributeChanged(ogn::OmniGraphDatabase* db, AttributeObj const& attr, bool isAttributeCreated) { static_cast<NodeTypeDataClass*>(db)->onDynamicAttributesChanged(attr, isAttributeCreated); } /** * Implementation of omni::graph::core::INodeType::udpateNodeVersion to use as part of a node type definition */ static bool updateNodeVersion(const GraphContextObj& context, const NodeObj& node, int oldVersion, int newVersion) noexcept { return call_updateNodeVersion( is_detected<has_updateNodeVersion, NodeTypeClass>(), context, node, oldVersion, newVersion); } /** * Implementation of omni::graph::core::INodeType::getAllMetadata to use as part of a node type definition */ static size_t getAllMetadata(const NodeTypeObj& nodeType, const char** keyBuf, const char** valueBuf, size_t bufSize) noexcept { return call_getAllMetadata(is_detected<has_getAllMetadata, NodeTypeClass>(), nodeType, keyBuf, valueBuf, bufSize); } /** * Implementation of omni::graph::core::INodeType::getMetadata to use as part of a node type definition */ static const char* getMetadata(const NodeTypeObj& nodeType, const char* key) noexcept { return call_getMetadata(is_detected<has_getMetadata, NodeTypeClass>(), nodeType, key); } /** * Implementation of omni::graph::core::INodeType::getMetadataCount to use as part of a node type definition */ static size_t getMetadataCount(const NodeTypeObj& nodeType) noexcept { return call_getMetadataCount(is_detected<has_getMetadataCount, NodeTypeClass>(), nodeType); } /** * Implementation of omni::graph::core::INodeType::setMetadata to use as part of a node type definition */ static void setMetadata(const NodeTypeObj& nodeType, const char* key, const char* value) noexcept { call_setMetadata(is_detected<has_setMetadata, NodeTypeClass>(), nodeType, key, value); } /** * Implementation of omni::graph::core::INodeType::addSubNodeType to use as part of a node type definition */ static void addSubNodeType(const NodeTypeObj& nodeType, const char* subNodeTypeName, const NodeTypeObj& subNodeType) { call_addSubNodeType(is_detected<has_addSubNodeType, NodeTypeClass>(), nodeType, subNodeTypeName, subNodeType); } /** * Implementation of omni::graph::core::INodeType::getSubNodeType to use as part of a node type definition */ static NodeTypeObj getSubNodeType(const NodeTypeObj& nodeType, const char* subNodeTypeName) { return call_getSubNodeType(is_detected<has_getSubNodeType, NodeTypeClass>(), nodeType, subNodeTypeName); } /** * Implementation of omni::graph::core::INodeType::createNodeType to use as part of a node type definition */ static NodeTypeObj createNodeType(const char* nodeTypeName, int version) { return call_createNodeType(is_detected<has_createNodeType, NodeTypeClass>(), nodeTypeName, version); } /** * Implementation of omni::graph::core::INodeType::onConnectionTypeResolve to use as part of a node type definition */ static void onConnectionTypeResolve(const NodeObj& node) { call_onConnectionTypeResolve(is_detected<has_onConnectionTypeResolve, NodeTypeClass>(), node); } /** * Implementation of omni::graph::core::INodeType::inspect to use as part of a node type definition */ static bool inspect(const NodeTypeObj& nodeType, inspect::IInspector* inspector) { return call_inspect(is_detected<has_inspect, NodeTypeClass>(), nodeType, inspector); } public: /** * @brief Constructor with the basic information that is needed to identify a node type * * @param nodeTypeName Unique name of the node type * @param nodeTypeVersion Version of the node type being defined * @param nodeTypeExtension Extension owning the node type */ OmniGraphNode_ABI(const char* nodeTypeName, int nodeTypeVersion, const char* nodeTypeExtension) : NodeTypeABI(nodeTypeName, nodeTypeVersion, nodeTypeExtension) { sm_nodeTypeName = nodeTypeName; } /** * @brief Populate an INodeType interface with the functions that implement this particular templated node type * * @param[out] nodeTypeInterface Interface to be populated */ void populateNodeTypeInterface(INodeType& nodeTypeInterface) const override { // Any functions required in order for the interface to work are left as nullptr when // there is no implementation of them overridden by the node so that the defaults can be used. nodeTypeInterface.addInput = is_detected<has_addInput, NodeTypeClass>::value ? addInput : nullptr; nodeTypeInterface.addOutput = is_detected<has_addOutput, NodeTypeClass>::value ? addOutput : nullptr; nodeTypeInterface.addState = is_detected<has_addState, NodeTypeClass>::value ? addState : nullptr; nodeTypeInterface.addExtendedInput = is_detected<has_addExtendedInput, NodeTypeClass>::value ? addExtendedInput : nullptr; nodeTypeInterface.addExtendedOutput = is_detected<has_addExtendedOutput, NodeTypeClass>::value ? addExtendedOutput : nullptr; nodeTypeInterface.addExtendedState = is_detected<has_addExtendedState, NodeTypeClass>::value ? addExtendedState : nullptr; nodeTypeInterface.hasState = is_detected<has_hasState, NodeTypeClass>::value ? hasState : nullptr; nodeTypeInterface.setHasState = is_detected<has_setHasState, NodeTypeClass>::value ? setHasState : nullptr; nodeTypeInterface.getNodeType = getNodeType; nodeTypeInterface.getTypeName = getTypeName; nodeTypeInterface.initialize = initialize; nodeTypeInterface.initializeType = initializeType; nodeTypeInterface.registerTasks = registerTasks; nodeTypeInterface.release = release; nodeTypeInterface.updateNodeVersion = updateNodeVersion; nodeTypeInterface.getAllMetadata = is_detected<has_getAllMetadata, NodeTypeClass>::value ? getAllMetadata : nullptr; nodeTypeInterface.getMetadata = is_detected<has_getMetadata, NodeTypeClass>::value ? getMetadata : nullptr; nodeTypeInterface.getMetadataCount = is_detected<has_getMetadataCount, NodeTypeClass>::value ? getMetadataCount : nullptr; nodeTypeInterface.setMetadata = is_detected<has_setMetadata, NodeTypeClass>::value ? setMetadata : nullptr; nodeTypeInterface.addSubNodeType = is_detected<has_addSubNodeType, NodeTypeClass>::value ? addSubNodeType : nullptr; nodeTypeInterface.getScheduleNodeCount = is_detected<has_getScheduleNodeCount, NodeTypeClass>::value ? getScheduleNodeCount : nullptr; nodeTypeInterface.getScheduleNodes = is_detected<has_getScheduleNodes, NodeTypeClass>::value ? getScheduleNodes : nullptr; nodeTypeInterface.getSubNodeType = is_detected<has_getSubNodeType, NodeTypeClass>::value ? getSubNodeType : nullptr; nodeTypeInterface.createNodeType = is_detected<has_createNodeType, NodeTypeClass>::value ? createNodeType : nullptr; nodeTypeInterface.onConnectionTypeResolve = onConnectionTypeResolve; nodeTypeInterface.inspect = is_detected<has_inspect, NodeTypeClass>::value ? inspect : nullptr; nodeTypeInterface.compute = regOGN() || regABI() || vecOGN() || vecABI() ? compute : nullptr; nodeTypeInterface.computeVectorized = regOGN() || vecOGN() || vecABI() ? computeVectorized : nullptr; nodeTypeInterface.releaseInstance = releaseInstance; nodeTypeInterface.destroyDB = destroyDB; nodeTypeInterface.notifyTypeResolution = notifyTypeResolution; nodeTypeInterface.notifyDynamicAttributeChanged = notifyDynamicAttributeChanged; nodeTypeInterface.getCarbABIVersion = []() { return INodeType::getInterfaceDesc().version; }; } static const char* sm_nodeTypeName; //!< Name of node type, to allow passing a static function to ABI }; template <typename NodeTypeClass, typename NodeTypeDataClass> const char* OmniGraphNode_ABI<NodeTypeClass, NodeTypeDataClass>::sm_nodeTypeName{ nullptr }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/ComputeHelpersDynamicInputsDetails.h
namespace Private { template <size_t TUPLE_SIZE, typename ComputeType, typename InputType> inline bool validateInputsAndOutput(gsl::span<InputType const> const inputs, ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu> result) { if (TUPLE_SIZE == 1) { if (result.type().arrayDepth == 0) { // handle single values auto resultValue = result.template get<ComputeType>(); if (!TryComputeHelper<decltype(resultValue)>::testValid(resultValue)) return false; for (auto const& input : inputs) { auto const inputValue = input.template get<ComputeType>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } else { // handle arrays auto resultValue = result.template get<ComputeType[]>(); if (!TryComputeHelper<decltype(resultValue)>::testValid(resultValue)) return false; for (auto const& input : inputs) { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isArray(input)) { auto const inputValue = input.template get<ComputeType[]>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } else { auto const inputValue = input.template get<ComputeType>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } } } else { using ComputeTypeTuple = ComputeType[TUPLE_SIZE]; if (result.type().componentCount != TUPLE_SIZE) return false; if (result.type().arrayDepth == 0) { // handle tuple values auto resultValue = result.get<ComputeTypeTuple>(); if (!TryComputeHelper<decltype(resultValue)>::testValid(resultValue)) return false; for (auto const& input : inputs) { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isTuple(input)) { auto const inputValue = input.template get<ComputeTypeTuple>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } else { auto const inputValue = input.template get<ComputeType>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } } else { // handle arrays of tuples auto resultValue = result.get<ComputeTypeTuple[]>(); if (!TryComputeHelper<decltype(resultValue)>::testValid(resultValue)) return false; for (auto const& input : inputs) { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isArray(input)) { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isTuple(input)) { auto const inputValue = input.template get<ComputeTypeTuple[]>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } else { auto const inputValue = input.template get<ComputeType[]>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } else { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isTuple(input)) { auto const inputValue = input.template get<ComputeTypeTuple>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } else { auto const inputValue = input.template get<ComputeType>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } } } } return true; } template <typename ComputeType, typename InputType, typename Functor> inline bool tryComputeInputsWithArrayBroadcasting(gsl::span<InputType const> const inputs, ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu> result, Functor functor, size_t count) { if (!validateInputsAndOutput<1, ComputeType, InputType>(inputs, result)) return false; if (result.type().arrayDepth == 0) { // the output is not an array auto resultValue = result.get<ComputeType>(); using TResultValue = typename remove_const_ref<decltype(*resultValue)>::type; auto inputIt = inputs.begin(); // initialize the result { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); auto& resultData = ArrayHelper<1, TResultValue>::accessArg(*resultValue, 0, 0, instance); resultData = inputData; } } // accumulate the result by iterating over the remaining inputs ++inputIt; for (; inputIt != inputs.end(); ++inputIt) { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { functor(ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance), ArrayHelper<1, TResultValue>::accessArg(*resultValue, 0, 0, instance)); } } return true; } else { // cache for the array data, to avoid having to get it from fabric multiple times // (once for getting the size, a second time for computing the result) std::vector<ArrayDataReadOnly<ComputeType[], ogn::kCpu>> arrayDataCache; arrayDataCache.reserve(inputs.size()); // the output is an array of single values // result.get<ComputeType[]>() returns a temporary value, so it has to be cached. auto arrayObj = result.template get<ComputeType[]>(); auto& resultArray = *arrayObj; using TResultValue = typename remove_const_ref<decltype(resultArray)>::type; for (size_t instance = 0; instance < count; ++instance) { // find the output length size_t len = 1; for (const auto& input : inputs) { using TInput = typename remove_const_ref<decltype(input)>::type; if (RuntimeAttribHelper<1, TInput>::isArray(input)) { arrayDataCache.emplace_back(input.template get<ComputeType[]>()); auto const& array = *arrayDataCache.back(); auto s = ArrayHelper<1, decltype(array)>::getArgsLengthAndAdjustHandle(array, instance); len = std::max(len, s); } } if (instance) resultArray.adjustHandle(1); resultArray.resize(len); auto arrayDataCacheIt = arrayDataCache.cbegin(); auto inputIt = inputs.begin(); using TInput = typename remove_const_ref<decltype(*inputIt)>::type; if (RuntimeAttribHelper<1, TInput>::isArray(*inputIt)) { const auto& inputValue = *arrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, idx, 0, instance); auto& resultData = ArrayHelper<1, TResultValue>::accessArg(resultArray, idx, 0, instance); resultData = inputData; } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (size_t idx = 0; idx < len; ++idx) { auto& resultData = ArrayHelper<1, TResultValue>::accessArg(resultArray, idx, 0, instance); resultData = inputData; } } ++inputIt; for (; inputIt != inputs.end(); ++inputIt) { using TInput = typename remove_const_ref<decltype(*inputIt)>::type; if (RuntimeAttribHelper<1, TInput>::isArray(*inputIt)) { auto const& inputValue = *arrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, idx, 0, instance); auto& resultData = ArrayHelper<1, TResultValue>::accessArg(resultArray, idx, 0, instance); functor(inputData, resultData); } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (size_t idx = 0; idx < len; ++idx) { auto& resultData = ArrayHelper<1, TResultValue>::accessArg(resultArray, idx, 0, instance); functor(inputData, resultData); } } } } return true; } return false; } template<size_t N, typename InputIterator> inline bool isTuple(InputIterator it) { return N != 1 && it->type().componentCount == N; } template<typename InputIterator> inline bool isArray(InputIterator it) { return it->type().arrayDepth != 0; } template <size_t N, typename ComputeType, typename InputType, typename Functor> inline bool tryComputeInputsWithTupleBroadcasting(gsl::span<InputType const> const inputs, ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu> result, Functor functor, size_t count) { if (!validateInputsAndOutput<N, ComputeType, InputType>(inputs, result)) return false; using ComputeTypeTuple = ComputeType[N]; if (result.type().arrayDepth == 0) { // the output is not an array auto resultValue = result.get<ComputeTypeTuple>(); using TResultValue = typename remove_const_ref<decltype(*resultValue)>::type; auto inputIt = inputs.begin(); // initialize the result using TInput = typename remove_const_ref<decltype(*inputIt)>::type; if (RuntimeAttribHelper<N, TInput>::isTuple(*inputIt)) { auto const inputValue = inputIt->template get<ComputeTypeTuple>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, 0, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(*resultValue, 0, t, instance); resultData = inputData; } } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(*resultValue, 0, t, instance); resultData = inputData; } } } ++inputIt; for (; inputIt != inputs.end(); ++inputIt) { using TInput = typename remove_const_ref<decltype(*inputIt)>::type; if (RuntimeAttribHelper<N, TInput>::isTuple(*inputIt)) { auto const inputValue = inputIt->template get<ComputeTypeTuple>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, 0, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(*resultValue, 0, t, instance); functor(inputData, resultData); } } } else { auto const& inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(*resultValue, 0, t, instance); functor(inputData, resultData); } } } } return true; } else // result.type().arrayDepth > 0 { // cache for the array data, to avoid having to get it from fabric multiple times // (once for getting the size, a second time for computing the result) // Note: we need different caches for tuple and non tuple arrays std::vector<ArrayDataReadOnly<ComputeType[], ogn::kCpu>> arrayDataCache; arrayDataCache.reserve(inputs.size()); std::vector<TupleArrayDataReadOnly<ComputeTypeTuple[], ogn::kCpu>> tupleArrayDataCache; tupleArrayDataCache.reserve(inputs.size()); // result.get<ComputeTypeTuple[]>() returns a temporary value, so it has to be cached. auto arrayObj = result.template get<ComputeTypeTuple[]>(); auto& resultArray = *arrayObj; using TResultValue = typename remove_const_ref<decltype(resultArray)>::type; for (size_t instance = 0; instance < count; ++instance) { size_t len = 1; for (auto const& input : inputs) { using TInput = typename remove_const_ref<decltype(input)>::type; if (RuntimeAttribHelper<N, TInput>::isArray(input)) { tupleArrayDataCache.emplace_back(input.template get<ComputeTypeTuple[]>()); auto const& array = *tupleArrayDataCache.back(); auto s = ArrayHelper<N, decltype(array)>::getArgsLengthAndAdjustHandle(array, instance); len = std::max(len, s); } else if (RuntimeAttribHelper<1, TInput>::isArray(input)) { arrayDataCache.emplace_back(input.template get<ComputeType[]>()); auto const& array = *arrayDataCache.back(); auto s = ArrayHelper<1, decltype(array)>::getArgsLengthAndAdjustHandle(array, instance); len = std::max(len, s); } } if (instance) resultArray.adjustHandle(1); resultArray.resize(len); auto arrayDataCacheIt = arrayDataCache.cbegin(); auto tupleArrayDataCacheIt = tupleArrayDataCache.cbegin(); auto inputIt = inputs.begin(); if (!isArray(inputIt)) { if (isTuple<N>(inputIt)) { auto const inputValue = inputIt->template get<ComputeTypeTuple>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, 0, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); resultData = inputData; } } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); resultData = inputData; } } } } else { if (isTuple<N>(inputIt)) { auto const& inputValue = *tupleArrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, idx, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); resultData = inputData; } } } else { const auto& inputValue = *arrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, idx, 0, instance); for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); resultData = inputData; } } } } ++inputIt; for (; inputIt != inputs.end(); ++inputIt) { if (!isArray(inputIt)) { if (isTuple<N>(inputIt)) { auto const inputValue = inputIt->template get<ComputeTypeTuple>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, 0, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); functor(inputData, resultData); } } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); functor(inputData, resultData); } } } } else { if (isTuple<N>(inputIt)) { auto const& inputValueArray = *tupleArrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValueArray)>::type; for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValueArray, idx, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); functor(inputData, resultData); } } } else { auto const& inputValueArray = *arrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValueArray)>::type; for (size_t idx = 0; idx < len; ++idx) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValueArray, idx, 0, instance); for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); functor(inputData, resultData); } } } } } } return true; } return false; } }//namespace private
omniverse-code/kit/include/omni/graph/core/ogn/Types.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/Handle.h> #include <omni/graph/core/TemplateUtils.h> #include <omni/fabric/IPath.h> #include <omni/fabric/IToken.h> namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== /** * Enumeration of the memory locations an attribute's data might have. The data location will determine which of * the data accessors will be used, and what type of data they will return. (GPU data will always be returned * as raw pointers since the CPU cannot access that memory.) * * This type will be used as a template parameter to adjust the behaviour of OGN wrapper classes. */ enum eMemoryType { kCpu, //!< The attribute's data is always on the CPU kCuda, //!< The attribute's data is always on the GPU kAny //!< The attribute's data location can be either, decided at runtime }; // ====================================================================== /** * Enumeration of an attribute's access type. In order to provide information to the scheduler about how * fabric data will be accessed, one of these access types is associated with all generated attributes. * * This type will be used as a template parameter to adjust the behaviour of OGN wrapper classes. */ enum eAttributeType { kOgnInput, kOgnOutput, kOgnState }; // ====================================================================== /** * Severity level for logging messages. */ enum class Severity : uint8_t { eInfo = 0, eWarning, eError, eCount }; // ====================================================================== /** * When templating methods by data type the template types must be unique. The implementation of * NameToken in iComputeGraph defines it as a simple uint64_t, which is also a raw data type used by OGN. * To allow different templates to be instantiated for these two data types, identical in implementation but * vastly different in semantics, this wrapper can be used instead as a drop-in replacement for NameToken. * * Thus these two template instantiations will be determined to be unique by the compiler, but the data * passed in will be identical, to be interpreted in the correct way by the function: * * template <typename DataType> void myData(DataType& value); * myData(OgnToken&); // Receives an OgnToken, which is directly convertible to NameToken * myData(uint64_t&); // Receives a raw uint64_t * * This type will be used as a template parameter to adjust the behaviour of OGN wrapper classes. */ using Token = omni::fabric::Token; static_assert(::std::is_convertible<Token, NameToken>::value, "ogn::Token must be equivalent to NameToken"); using Path = omni::fabric::Path; static_assert(::std::is_convertible<Path, TargetPath>::value, "ogn::Path must be equivalent to TargetPath"); } // namespace ogn } // namespace core } // namespace graph } // namespace omni // Declare this outside of the namespaces, relying on the uniqueness of the name to provide easy access using OgnToken = omni::graph::core::ogn::Token; using OgnPath = omni::graph::core::ogn::Path; // Generated code is kept smaller by assuming this namespace is active. The alternative would be to explicitly // reference all types and values used by the generated code, which would end up amounting to the same thing. // Core types can be accessed directly (e.g. getDataW()) and OGN types use the shortened "ogn::string" using namespace omni::graph::core;
omniverse-code/kit/include/omni/graph/core/ogn/array.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // ogn::const_array Read-only array of fabric data // ogn::array Array of fabric data, with writing and resizing capabilities // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= #include <gsl/span> #include <cstring> #include <stdexcept> #include <type_traits> #include <omni/graph/core/Handle.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/CudaUtils.h> #include <omni/graph/core/iAttributeData.h> using omni::fabric::PtrToPtrKind; namespace omni { namespace graph { namespace core { namespace ogn { // ================================================================================================================= /** * std::span-like wrapper class for array attribute data in the Ogn Database. * * In attribute terms an array is a variable sized collection of data of a single type, not to be confused with * the C++ notion of an array, which is a fixed sized collection of data of a single type. * * This wrapper operates by using the Fabric interface to interact with array data it has stored. * The base array class provides common operations common to both const and non-const data. * * @tparam BaseDataType Type of data contained within the array * @tparam HandleType Attribute handle used to access the underlying Fabric copy of the data */ template <typename BaseDataType, typename HandleType> class base_array { //from regular attributes template <typename, eAttributeType, eMemoryType, PtrToPtrKind> friend struct ArrayAttribute; //from runtime attributes template <typename, bool, eMemoryType, PtrToPtrKind > friend struct ArrayData; public: /** const version of the BaseDataType */ using ConstBaseDataType = const typename std::remove_const<BaseDataType>::type; /** Type definition of this class */ using this_t = base_array<BaseDataType, HandleType>; // Pass through the span iterator so that this class can iterate over it transparently /** Iterator over the array contents */ using iterator = typename gsl::span<BaseDataType>::iterator; /** Reverse iterator over the array contents */ using reverse_iterator = typename gsl::span<BaseDataType>::reverse_iterator; /** * Constructor */ base_array() = default; /** * Confirm that the data values are safe for accessing * * @return true if the context and attribute handle values are both valid */ bool isValid() const { return context() && (AttrKey)handle() != handle().invalidValue(); } /** * @return The number of elements in the currently managed array */ size_t size() const { return span().size();} /** * @return Is the currently managed array empty? */ bool empty() const { return span().empty(); } /** * @return Pointer to the raw data in the array (first element) */ ConstBaseDataType* data() const { return span().data();} /** * @return Iterator pointing to the beginning of the array */ const iterator begin() const { return span().begin(); } /** * @return Iterator pointing past the end of the array */ const iterator end() const { return span().end(); } /** * @return Iterator pointing to the end of the array */ const reverse_iterator rbegin() const { return span().rbegin(); } /** * @return Iterator pointing before the beginning of the array */ const reverse_iterator rend() const { return span().rend(); } /** * Access a specific element of the array. No bounds checking is performed. * * @param[in] index Element index into the array * @return Reference to the index'th element of the array */ ConstBaseDataType& operator[](size_t index) const { return span()[index]; } /** * Access a specific element of the array with bounds checking. * * @param[in] index Element index into the array * @return Reference to the index'th element of the array * @exception std::out_of_range if there is no data for the given index */ ConstBaseDataType& at(size_t index) const { auto const& spanObj = span(); if (!spanObj.data() || (spanObj.size() <= index)) { std::string rangeMessage("Attempt to access out of range index "); rangeMessage += std::to_string(index); throw std::out_of_range(rangeMessage); } return spanObj[index]; } /** * Access the underlying span that allows to access the array data * * @return A reference to the underlying span */ gsl::span<BaseDataType> const& span() const { if (m_dirty) const_cast<this_t*>(this)->reset(); m_dirty = false; return m_span; } /** * @brief Access the context to which this array belongs * * @returns Pointer to the context to which this array belongs */ GraphContextObj const* context() const { return m_context;} /** * @brief Access the attribute handle used to access the array data in Fabric * * @returns Pointer to the attribute handle used to access the array data in Fabric */ HandleType const& handle() const { return m_handle;} /** * Flag the span data as not being up2Date * */ void setDirty() const { m_dirty = true; } /** * In a vectorized context, move forward the current handle * * @param[in] idx The amount of instance(s) to offset the current handle */ void adjustHandle(size_t idx) { const IAttributeData& iData = *(context()->iAttributeData); moveHdl(iData, idx, m_handle); setDirty(); } protected: /** * Reset the internal span to point to new data. * */ void reset() { if (isValid()) { const IAttributeData& iData = *(m_context->iAttributeData); size_t count = 0; ConstAttributeDataHandle chdl = m_handle; iData.getElementCount(&count, *m_context, &chdl, 1); BaseDataType** ptrToData = getData(iData, m_handle); this->m_span = gsl::span<BaseDataType>(*ptrToData, count); } else { this->m_span = gsl::span<BaseDataType>(); } } /** * @brief Get a pointer to the array data from a specific const data handle * * @param iData Interface class containing the accessor functions * @param hdl Handle to the attribute data whose value is being retrieved * @return BaseDataType** Pointer to the array of retrieved data */ BaseDataType** getData(const IAttributeData& iData, ConstAttributeDataHandle const& hdl) { BaseDataType** ptrToData = nullptr; iData.getDataR((const void**)&ptrToData, *m_context, &hdl, 1); return ptrToData; } /** * @brief Get a pointer to the array data from a specific mutable data handle * * @param iData Interface class containing the accessor functions * @param hdl Handle to the attribute data whose value is being retrieved * @return BaseDataType** Pointer to the array of retrieved data */ BaseDataType** getData(const IAttributeData& iData, AttributeDataHandle const& hdl) { BaseDataType** ptrToData = nullptr; iData.getDataW((void**)&ptrToData, *m_context, &hdl, 1); return ptrToData; } /** * Set the context. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] context The graph context to which the array belongs */ void setContext(const GraphContextObj& context) { m_context = &context; setDirty(); } /** * Set the attribute handle. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] handle Handle to the attribute to which the array belongs */ void setHandle(HandleType handle) { m_handle = handle; setDirty(); } private: /** * helper that allows to make the proper call depending on the type of the handle */ void moveHdl(const IAttributeData& iData, size_t index, ConstAttributeDataHandle& hdl) const { hdl = iData.moveToAnotherInstanceR(*m_context, hdl, (int)index); } void moveHdl(const IAttributeData& iData, size_t index, AttributeDataHandle& hdl) const { hdl = iData.moveToAnotherInstanceW(*m_context, hdl, (int)index); } const GraphContextObj* m_context{ nullptr }; //!< The graph context to which the array belongs HandleType m_handle{ HandleType::invalidValue() }; //!< Handle to the attribute data bool mutable m_dirty { true }; //!< whether the span is uptodate or not gsl::span<BaseDataType> m_span; //!< Iterable managed array data }; // ================================================================================================================= /** * std::vector-like wrapper class for constant array attribute data in the Ogn Database. * It operates by using the Fabric interface to interact with array data it has stored. * This const version of the array wrapper should be used for input attributes, whose data cannot be changed. * (The naming is "array" for consistency with how attribute types are named, even though it doesn't * behave like a std::array, whose content size is determined at compile time.) * * @tparam BaseDataType Type of data contained within the array */ template <typename BaseDataType> class const_array : public base_array<const BaseDataType, ConstAttributeDataHandle> { public: /** * Constructor */ const_array() = default; }; // ================================================================================================================= /** * std::vector-like wrapper class for array attribute data in the Ogn Database. * It operates by using the Fabric interface to interact with array data it has stored. * This non-const version of the array wrapper should be used for output attributes, whose data will be changed. * (The naming is "array" for consistency with how attribute types are named, even though it doesn't * behave like a std::array, whose content size is determined at compile time.) * * @tparam BaseDataType Type of data contained within the array */ template <typename BaseDataType> class array : public base_array<BaseDataType, AttributeDataHandle> { public: /** The type of the parent class */ using parent_t = base_array<BaseDataType, AttributeDataHandle>; // Make non-templated functions available to pass 1 of template resolution // http://www.gotw.ca/gotw/087.htm using parent_t::data; using parent_t::size; // Pass through the span iterator so that this class can iterate over it transparently /** Iterator over the array contents */ using iterator = typename gsl::span<BaseDataType>::iterator; /** Reverse iterator over the array contents */ using reverse_iterator = typename gsl::span<BaseDataType>::reverse_iterator; /** * Constructor */ array() = default; /** * Assignment operator: performs a shallow copy * * @param[in] rhs The array being copied in * @return Reference to this */ array& operator=(array<BaseDataType> const& rhs) { return shallowCopy(reinterpret_cast<const_array<BaseDataType> const&>(rhs)); } /** * Assignment operator: performs a shallow copy of a const array of const data * * @param[in] rhs The array being copied in * @return Reference to this */ array& operator=(const_array<const BaseDataType> const& rhs) { return shallowCopy(reinterpret_cast<const_array<BaseDataType> const&>(rhs)); } /** * Assignment operator: performs a shallow copy of a const array of non-const data * * @param[in] rhs The array being copied in * @return Reference to this */ array& operator=(const_array<BaseDataType> const& rhs) { return shallowCopy(rhs); } /** * Performs a shallow copy the provided object over this object * * This will create a reference for this object that points to the provided object * * @param[in] from The array being copied in * @return Reference to this */ array& shallowCopy(const_array<BaseDataType> const& from) { this->context()->iAttributeData->copyData(this->handle(), *this->context(), from.handle()); this->setDirty(); return *this; } /** * Overwrite this object data with data from the provided object * * This will create a duplicate of all array members and reset the references * * @param[in] rhs The array being copied in * @return Reference to this */ array& deepCopy(parent_t const& rhs) { // Resize this array first so that it has the proper space to receive the new data resize(rhs.size()); if (rhs.size() == 0) { // No work to do when the new array is empty return *this; } // Get the raw arrays and walk them directly for the copy to minimize friction. rawCopyFrom(rhs.data(), std::conditional_t<std::is_assignable<BaseDataType, std::add_const_t<BaseDataType>>::value, std::true_type, std::false_type>()); return *this; } /** * Set the size of the array data to a new value. * * This may or may not relocate memory. If anything is holding a raw pointer from contents() * that pointer should be refreshed by calling contents() again when this method returns. * * @param[in] newCount New element count of the array. */ void resize(size_t newCount) { CUDA_SAFE_ASSERT(this->isValid()); const IAttributeData& iData = *(this->context()->iAttributeData); iData.setElementCount(*this->context(), this->handle(), newCount); this->setDirty(); } /** * @return Non-const pointer to the raw data in the array (first element) */ BaseDataType* data() { return const_cast<BaseDataType*>(this->parent_t::data()); } /** * Access a specific element of the array. No bounds checking is performed. * * @param[in] index Element index into the array * @return Reference to the index'th element of the array */ BaseDataType& operator[](size_t index) { return const_cast<BaseDataType&>(this->parent_t::operator[](index)); } /** * Access a specific element of the array with bounds checking. * * @param[in] index Element index into the array * @return Reference to the index'th element of the array * @exception std::out_of_range if there is no data for the given index */ BaseDataType& at(size_t index) { return const_cast<BaseDataType&>(this->parent_t::at(index)); } private: /** * Safe copy; one version for when the array members are assignable, the other as a fallback to do raw memcpy * * @param[in] srcArray The location of the raw data to be copied in (already vetted for size compatibility) * @param[in] assignable Overload selected when the base data type can or cannot be assigned */ void rawCopyFrom(const BaseDataType* srcArray, std::true_type assignable) { auto dstArray = data(); for (size_t i = 0; i < size(); ++i) { dstArray[i] = srcArray[i]; } } void rawCopyFrom(const BaseDataType* srcArray, std::false_type assignable) { memcpy(data(), srcArray, sizeof(BaseDataType) * size()); } }; /** Default trait indicating if the class is one of our array types * @tparam T Class type to check for being an array */ template<class T> struct is_array : std::false_type {}; /** Trait indicating that specific templated types are array types * @tparam T Class type to check for being an array * @tparam HandleType Attribute data handle type for the array */ template<class T, typename HandleType> struct is_array<base_array<T, HandleType>> : std::true_type {}; /** Trait indicating that mutable templated types are array types * @tparam T Class type to check for being an array */ template<class T> struct is_array<array<T>> : std::true_type {}; /** Trait indicating that constant templated types are array types * @tparam T Class type to check for being an array */ template<class T> struct is_array<const_array<T>> : std::true_type {}; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/RuntimeAttribute.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // RuntimeAttribute Wrapper providing access to attributes whose type is only known at runtime // This includes attributes inside bundles and attributes with extended types // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= #ifdef THIS_IS_INCLUDED_IN_THE_DOCUMENTATION // The information bracketed here with begin/end describes the interface that is recommended for use with attributes // whose data type is decided at runtime. The documentation uses these markers to perform a literal include of this // code into the docs so that it can be the single source of truth. // Note that the interface described here is not the complete set of C++ functions available, merely the ones that make // sense for the user to access when dealing with runtime attributes. // // // begin-extended-attribute-interface-description // A runtime attribute can either be an attribute defined as one of the extended types ("union" or "any") or an // attribute that is a member of a bundle. As you might guess, the defining feature of such attributes is the fact // that the type of data they store is not known until runtime. And further, that type can change from one evaluation // to the next. For that reason the runtime attribute accessors have different methods of acquiring their data than // regular attributes. // // The two ways of acquiring the accessor for a runtime attribute are directly, for extended types const auto& anyType = db.inputs.anyTypeAttribute(); // and as a member, for bundled attributes const auto memberAttribute = db.inputs.myBundle().attributeByName(db.tokens.memberName); // Runtime attributes can be copied, which copies both the type and the data of the attribute (unlike regular // attributes, which would just copy the data) auto newAttribute = anyType; // There is also another method that will just copy the data, though it is up to you to ensure that the data // types of the two attributes are the same. newAttribute.copyData(anyType); // As with regular attributes you can check if their data is valid... const bool anyIsValid = anyType.isValid(); // ...get the number of elements in the array, if they are an array type... const size_t howManyAnysDoIHave = anyType.size(); // ...and drop down to the ABI to get a handle for direct ABI calls (although for runtime attributes the handle // is AttributeDataHandle/ConstAttributeDataHandle, not AttributeHandle as it is for regular attributes since // the ABI has different capabilities for them) const auto& abiHandle = anyType.abi_handle(); // They also have a method to determine if the actual type of the attribute has been resolved. Until the type is // resolved the attribute's data cannot be accessed const bool isTheTypeKnown = anyType.resolved(); // For bundled attributes the name is not known until runtime either so a method to access that is provided, // returning the hardcoded name for extended attributes const memberName& = memberAttribute.name(); // And the resolved type information is also available. Checking for an unknown type is another way to determine // if the attribute type has not yet been resolved. const auto& anyAttributesType = anyType.type(); // Finally there are the data access methods. The primary one is a templated function through which you can access // the attribute's data in its raw form. The value returned isn't the data itself, it is a thin wrapper around the // data that has a few functions of its own. // // This is the function to call for the majority of attributes, whose memory space is fixed either to CPU or GPU. // It returns an object that can be used to access information about the attribute's value, including its memory location. const auto dataAsFloatObj = anyType.get<float>(); // The types allowed in the template are the set of all allowed attribute types, expressed as regular C++ types // without the attribute roles. For example float, float[], float[3], float[][3], etc. In most cases trying to access // the data with an unsupported type will result in a compile error (the exceptions being types that are aliases for // a supported type, e.g. "using float3 = float[3]"). In fact, since the "NameToken" supported type is an alias for // another supported type it must be retrieved with a special type set up for that purpose const auto dataAsToken = anyType.get<OgnToken>(); // The wrapper has a boolean cast operator, which checks to see if the requested type matches the actual resolved // data type of the attribute. This allows you to make a cascading check for types of attributes you are supporting if (const auto dataAsFloatObj = anyType.get<float>()) { processAsFloat(*dataAsFloatObj); } else if (const auto dataAsDoubleObj = anyType.get<double>()) { processAsDouble(*dataAsDoubleObj); } // In addition to the simple boolean validity test, the wrapper returned will have a few different methods, // depending on the template data type. // The dereference operators return references to the actual underlying attribute data (on the CPU - if your // attribute lives on the GPU you'll get a reference to a pointer to the underlying attribute data, which lives in // the GPU memory space and cannot be dereferenced on the CPU). Note that the default memory location of a bundled // attribute is whatever was specified for the bundle itself. const auto dataAsFloatObj = anyType.get<float>(); float const& floatValueDeref = *dataAsFloatObj; float const& floatValueFn = dataAsFloatObj(); float const* floatValuePtr = dataAsFloatObj.operator->(); // The same dereference operators work for tuple types as well const auto dataAsFloat3Obj = anyType.get<float[3](); float const (&float3ValueDeref)[3] = *dataAsFloat3Obj; // The tuple values also give you access to the tuple count and element-wise access float x = dataAsFloat3Obj[0]; assert( dataAsFloat3Obj.tupleSize() == 3); // Array data type wrappers dereference to the same array wrappers you get from regular attributes const auto dataAsFloatArrayObj = anyType.get<float[]>(); for (const auto& floatValue : *dataAsFloatArrayObj) { /* ... */ } size_t arrayElements = dataAsFloatArrayObj->size(); // For GPU attributes, which do not have the ability to dereference their array memory location, the wrapper instead // returns a raw pointer to the underlying GPU memory location of the array. const auto gpuFloatArrayObj = anyType.get<float[]>(); float const ***ptrToRawGpuData = *gpuFloatArrayObj; // When the node is configured to extract CUDA pointers on the CPU there is one fewer level of indirection for // arrays as the pointer returned is on the CPU. const auto gpuFloatArrayObj = anyType.get<float[]>(); float const ***ptrToGpuDataOnCpu = *gpuFloatArrayObj; float const **ptrToRawGpuData = *ptrToGpuDataOnCpu; // As with regular array attributes, before writing to elements of an output array attribute you must first resize // it to have the desired number of elements. auto outputFloatArrayObj = data.outputs.results().get<float[]>(); outputFloatArrayObj.resize( howManyDoINeed ); // For attributes whose memory space is determined at runtime, or when you want to access attribute data in a different // memory space than they were originally defined, you can force the retrieved data to be either on the CPU or GPU. const auto gpuVersionObj = anyType.getGpu<float>(); const auto cpuVersionObj = anyType.getCpu<float>(); // On rare occasions you may need to resolve the attribute's type at runtime, inside a node's compute() function. In // those cases the runtime attribute data can get out of sync so you need to notify it that a change has been made. AttributeObj out = db.abi_node().iNode->getAttributeByToken(db.abi_node(), outputs::anyOutput.m_token); out.iAttribute->setResolvedType(out, someNewType); anyOutput.reset(db.abi_context(), out.iAttribute->getAttributeDataHandle(out, kAccordingToContextIndex)); // end-extended-attribute-interface-description // #endif #include <omni/graph/core/ogn/TypeConversion.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/ogn/array.h> #include <omni/graph/core/ogn/string.h> #include <omni/fabric/Enums.h> using omni::fabric::PtrToPtrKind; namespace omni { namespace graph { namespace core { namespace ogn { // ============================================================================================================== // House the shared data types that will be used by all data type accessors. // Not all types are used by all classes, this just provides a single point of type definitions. template <typename CppType, bool readOnly, eMemoryType MemoryType, PtrToPtrKind GpuPtrType> struct data_type_traits : attribute_type_traits<CppType> { using parent_t = attribute_type_traits<CppType>; // Type that allows switching on memory type using isCpu_t = std::integral_constant<bool, MemoryType == kCpu>; // Template inheritance isn't smart enough to forward the types and constexprs so manually forward them here static constexpr bool isArray = parent_t::isArray; static constexpr uint8_t tupleCount = parent_t::tupleCount; static constexpr uint8_t arrayDepth = parent_t::arrayDepth; static constexpr BaseDataType baseType = parent_t::baseType; using data_t = typename parent_t::data_t; // The type of attribute handle used to call the ABI functions using handle_t = std::conditional_t<readOnly, const ConstAttributeDataHandle, AttributeDataHandle>; // The type that will be returned from individual element access (simple or tuple data) using element_t = typename std::conditional_t< readOnly, typename parent_t::element_t const, typename parent_t::element_t>; // The types used for storage where the constness is hardcoded into the attribute type using data_access_t = std::conditional_t<readOnly, data_t const, data_t>; using array_t = std::conditional_t< readOnly, std::conditional_t<std::is_same<const char, data_access_t>::value, const_string, const_array<data_access_t>>, std::conditional_t<std::is_same<char, data_access_t>::value, string, array<data_access_t>>>; // const, non-const, and appropriate const pointers to the Fabric data using data_ptr_t = std::conditional_t<isArray, data_t**, data_t*>; using data_ptr_const_t = std::conditional_t<isArray, data_t const**, data_t const*>; using data_ptr_access_t = std::conditional_t<readOnly, data_ptr_const_t, data_ptr_t>; // CPU array data lives in the wrappers, GPU data is raw since it cannot be dereferenced using array_data_t = std::conditional_t<MemoryType == kCpu, array_t, data_ptr_access_t>; // ============================================================================================================== // Simple test to see if the type of data in the template parameter is compatible with a specific Type() static bool matchesType(const Type& attributeType) { return attributeType.baseType == baseType && attributeType.componentCount == tupleCount && attributeType.arrayDepth == arrayDepth; } // ============================================================================================================== // Templated access to getting read-only and writable values, calling the correct ABI functions based on memory location static data_ptr_const_t readOnlyData(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle) { return _readOnlyData(contextObj, attrHandle, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>()); } static data_ptr_t writableData(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle) { return _writableData(contextObj, attrHandle, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>()); } // Calling the right one depending on the handle type static data_ptr_const_t data(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle) { return _readOnlyData(contextObj, attrHandle, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>()); } static data_ptr_t data(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle) { return _writableData(contextObj, attrHandle, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>()); } // ============================================================================================================== // Retrieving a reference on existing data. // If data is not located at the right place (gpu/cpu), returns nullptr static void dataReference(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, ConstRawPtr& ref, size_t& size) { _dataReference(contextObj, attrHandle, ref, size, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>(), std::conditional_t<readOnly, std::true_type, std::false_type>()); } static void dataReference(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, RawPtr& ref, size_t& size) { _dataReference(contextObj, attrHandle, ref, size, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>(), std::conditional_t<readOnly, std::true_type, std::false_type>()); } private: // These methods could not use the getDataX<> templates due to an oddity in how const is handled in composed typedefs. // It's slightly more efficient to build anyway, and since it goes through the ABI it's just as safe static data_ptr_const_t _readOnlyData(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, std::true_type) { data_ptr_const_t out{ nullptr }; const void** outPtr = (const void**)(&out); contextObj.iAttributeData->getDataRGpuAt(outPtr, contextObj, &attrHandle, 1, GpuPtrType); return out; } static data_ptr_const_t _readOnlyData(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, std::false_type) { data_ptr_const_t out{ nullptr }; const void** outPtr = (const void**)(&out); contextObj.iAttributeData->getDataR(outPtr, contextObj, &attrHandle, 1); return out; } static data_ptr_t _writableData(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, std::true_type) { data_ptr_t out{ nullptr }; void** outPtr = (void**)(&out); contextObj.iAttributeData->getDataWGpuAt(outPtr, contextObj, &attrHandle, 1, GpuPtrType); return out; } static data_ptr_t _writableData(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, std::false_type) { data_ptr_t out{ nullptr }; void** outPtr = (void**)(&out); contextObj.iAttributeData->getDataW(outPtr, contextObj, &attrHandle, 1); return out; } static void _dataReference(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, ConstRawPtr& ref, size_t& size, std::true_type isGpu, std::true_type isRO) { contextObj.iAttributeData->getDataReferenceRGpuAt(attrHandle, contextObj, GpuPtrType, ref, size); } static void _dataReference(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, ConstRawPtr& ref, size_t& size, std::false_type isGpu, std::true_type isRO) { contextObj.iAttributeData->getDataReferenceR(attrHandle, contextObj, ref, size); } static void _dataReference(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, RawPtr& ref, size_t& size, std::true_type isGpu, std::false_type isRO) { contextObj.iAttributeData->getDataReferenceWGpuAt(attrHandle, contextObj, GpuPtrType, ref, size); } static void _dataReference(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, RawPtr& ref, size_t& size, std::false_type isGpu, std::false_type isRO) { contextObj.iAttributeData->getDataReferenceW(attrHandle, contextObj, ref, size); } }; // ============================================================================================================== // Simple wrapper to access the actual value with potential conversion on the read one template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct SimpleDataReadOnly { using data_traits = data_type_traits<CppType, true, MemoryType, GpuPtrType>; using return_type = const typename data_traits::data_t; typename data_traits::data_ptr_access_t m_value{nullptr}; Converter<typename data_traits::data_t> m_converter; Type const& m_type; explicit SimpleDataReadOnly(typename data_traits::data_access_t* value, Type const& type) : m_value{ value }, m_type{ type } {} return_type& operator*() const { return *m_converter.convertValue(m_value, m_type); } return_type* operator->() const { return m_converter.convertValue(m_value, m_type); } gsl::span<return_type> vectorized(size_t count) const { if (m_converter.willConvert(m_type)) { if (count != 1) return gsl::span<return_type>{}; return gsl::span<return_type>{ m_converter.convertValue(m_value, m_type), 1 }; } return gsl::span<return_type>{ m_value, count }; } operator bool() const { return m_value; } }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct SimpleDataWritable { using data_traits = data_type_traits<CppType, false, MemoryType, GpuPtrType>; using return_type = typename data_traits::data_t; typename data_traits::data_ptr_access_t m_value{ nullptr }; SimpleDataWritable(typename data_traits::data_access_t* m_value, Type const&) : m_value{m_value} {} return_type& operator*() { return *this->m_value; } return_type* operator->() { return this->m_value; } gsl::span<return_type> vectorized(size_t count) const { return { m_value, count }; } operator bool() const { return m_value; } }; // ============================================================================================================== // Tuple data behaves the same as simple data, with the addition of element accessors and a size function. template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct TupleDataReadOnly : SimpleDataReadOnly<CppType, MemoryType, GpuPtrType> { using data_traits = data_type_traits<CppType, true, MemoryType, GpuPtrType>; TupleDataReadOnly(typename data_traits::data_access_t* value, Type const& type) : SimpleDataReadOnly<CppType, MemoryType, GpuPtrType>{value, type} {} // GPU data is passed as a pointer to the tuple so for now there is no need for elementwise access here const typename data_traits::element_t& operator[](uint8_t index) const { static_assert(MemoryType == kCpu, "Cannot access individual tuple elements on GPU data"); CARB_ASSERT(index < this->tupleSize()); return (**this)[index]; } uint8_t tupleSize() const { return data_traits::tupleCount; } }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct TupleDataWritable : SimpleDataWritable<CppType, MemoryType, GpuPtrType> { using data_traits = data_type_traits<CppType, false, MemoryType, GpuPtrType>; TupleDataWritable(typename data_traits::data_access_t* value, Type const& type) : SimpleDataWritable<CppType, MemoryType, GpuPtrType>{value, type} {} // GPU data is passed as a pointer to the tuple so for now there is no need for elementwise access here typename data_traits::element_t& operator[](uint8_t index) { static_assert(MemoryType == kCpu, "Cannot access individual tuple elements on GPU data"); CARB_ASSERT(index < this->tupleSize()); return (**this)[index]; } uint8_t tupleSize() const { return data_traits::tupleCount; } }; // ============================================================================================================== //Default version, for CPU template <typename CppType, bool readOnly, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayData { using data_traits = data_type_traits<CppType, readOnly, MemoryType, GpuPtrType>; using return_type = typename data_traits::array_data_t; using handle_type = typename data_traits::handle_t; explicit ArrayData(const GraphContextObj& context, handle_type const* handle, bool isValid) { if (isValid) { m_arrayData.setContext(context); m_arrayData.setHandle(*handle); } } size_t size() const { return m_arrayData.isValid() ? m_arrayData.size() : 0; } return_type& operator*() { return m_arrayData; } return_type& operator()() { return m_arrayData; } return_type* operator->() { return &m_arrayData; } //const accessors return_type const& operator*() const { return m_arrayData; } return_type const& operator()() const { return m_arrayData; } return_type const* operator->() const { return &m_arrayData; } //Bool operator operator bool() const { return m_arrayData.isValid(); } //Invalidate void invalidate() { m_arrayData.setDirty();} GraphContextObj const* context() const { return m_arrayData.context(); } handle_type const& handle() const { return m_arrayData.handle(); } protected: return_type m_arrayData; }; // ============================================================================================================== // Special version, for GPU template <typename CppType, bool readOnly, PtrToPtrKind GpuPtrType> struct ArrayData<CppType, readOnly, kCuda, GpuPtrType> { using data_traits = data_type_traits<CppType, readOnly, kCuda, GpuPtrType>; using return_type = typename data_traits::array_data_t; using handle_type = typename data_traits::handle_t; using this_t = ArrayData<CppType, readOnly, kCuda, GpuPtrType>; explicit ArrayData(const GraphContextObj& context, handle_type const* handle, bool isValid) { if (isValid) { m_ctx = &context; m_hdl = handle; } else { m_ctx = nullptr; m_hdl = nullptr; } } size_t size() const { size_t count = 0; ConstAttributeDataHandle hdl = *m_hdl; m_ctx->iAttributeData->getElementCount(&count, *m_ctx, &hdl, 1); return count; } //accessors return_type deref() { using PtrType = typename std::conditional<readOnly, ConstRawPtr, RawPtr>::type; return_type dataPtr{ nullptr }; size_t size = 0; data_traits::dataReference(*m_ctx, *m_hdl, (PtrType&) dataPtr, size); return size ? dataPtr : 0; } return_type operator*() { return data_traits::data(*this->m_ctx, *this->m_hdl); } return_type operator()() { return data_traits::data(*this->m_ctx, *this->m_hdl); } //const accessors return_type const operator*() const { return const_cast<this_t*>(this)->operator*(); } return_type const operator()() const { return const_cast<this_t*>(this)->operator()(); } operator bool() const { return m_ctx && m_hdl; } void invalidate() {} GraphContextObj const* context() const { return m_ctx; } handle_type const& handle() const { return *m_hdl; } protected: GraphContextObj const* m_ctx; handle_type const* m_hdl; }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayDataReadOnly : ArrayData<CppType, true, MemoryType, GpuPtrType> { ArrayDataReadOnly(const GraphContextObj& context, ConstAttributeDataHandle const* handle, bool isValid) : ArrayData<CppType, true, MemoryType, GpuPtrType>{ context, handle, isValid } {} }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayDataWritable : ArrayData<CppType, false, MemoryType, GpuPtrType> { ArrayDataWritable(const GraphContextObj& context, AttributeDataHandle const* handle, bool isValid) : ArrayData<CppType, false, MemoryType, GpuPtrType>{context, handle, isValid} {} void resize(size_t newCount) { auto const& ctx = *this->context(); const IAttributeData& iData = *(ctx.iAttributeData); iData.setElementCount(ctx, this->handle(), newCount); this->invalidate(); } }; // ============================================================================================================== template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct TupleArrayDataReadOnly : ArrayDataReadOnly<CppType, MemoryType, GpuPtrType> { using data_traits = data_type_traits<CppType, true, MemoryType, GpuPtrType>; TupleArrayDataReadOnly(const GraphContextObj& context, ConstAttributeDataHandle const* handle, bool isValid) : ArrayDataReadOnly<CppType, MemoryType, GpuPtrType>{context, handle, isValid} {} // GPU data is passed as a pointer to the tuple so for now there is no need for element wise access here const typename data_traits::data_t& operator[] (uint8_t index) const { static_assert(MemoryType == kCpu, "Cannot access individual tuple elements on GPU data"); CARB_ASSERT(index < this->tupleSize()); return this->m_arrayData[index]; } uint8_t tupleSize() const { return data_traits::tupleCount; } }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct TupleArrayDataWritable : ArrayDataWritable<CppType, MemoryType, GpuPtrType> { using data_traits = data_type_traits<CppType, false, MemoryType, GpuPtrType>; TupleArrayDataWritable(const GraphContextObj& context, AttributeDataHandle const* handle, bool isValid) : ArrayDataWritable<CppType, MemoryType, GpuPtrType>{context, handle, isValid} {} // GPU data is passed as a pointer to the tuple so for now there is no need for element wise access here typename data_traits::data_t& operator[](uint8_t index) { static_assert(MemoryType == kCpu, "Cannot access individual tuple elements on GPU data"); CARB_ASSERT(index < this->tupleSize()); return this->m_arrayData[index]; } uint8_t tupleSize() const { return data_traits::tupleCount; } }; // ====================================================================== /** * Class responsible for managing the interaction with an attribute whose data type is determined at runtime. * These attributes may or may not not have a corresponding node attribute object. Those within bundles are virtual * and do not have a concrete attribute. Those with extended types, do. * * It wraps the attribute information in an interface with a more natural interaction than the raw ABI calls. */ template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> class RuntimeAttribute { public : // Make const-ness aware at compile time so that this class be used in const and non-const contexts static constexpr bool readOnly = (AttributeType == ogn::kOgnInput); //! The handle types are not simply "X" and "const X" variations so the type has to be explicitly defined //! for writable (output/state) and non-writable (input) attribute types. using dataHandle_t = typename std::conditional_t<readOnly, ConstAttributeDataHandle, AttributeDataHandle>; private: //! Traits for generic access to the raw memory. template <eMemoryType MT> using raw_data_traits_mt = data_type_traits<uint8_t, readOnly, MT, GpuPtrType>; using raw_data_traits = raw_data_traits_mt<MemoryType>; template <typename CppType, eMemoryType AccessorMemoryType, PtrToPtrKind AccessorGpuPtrType = PtrToPtrKind::eNotApplicable> using simpleData_t = std::conditional_t< readOnly, ogn::SimpleDataReadOnly<CppType, AccessorMemoryType, AccessorGpuPtrType>, ogn::SimpleDataWritable<CppType, AccessorMemoryType, AccessorGpuPtrType> >; template <typename CppType, eMemoryType AccessorMemoryType, PtrToPtrKind AccessorGpuPtrType = PtrToPtrKind::eNotApplicable> using tupleData_t = typename std::conditional_t< readOnly, ogn::TupleDataReadOnly<CppType, AccessorMemoryType, AccessorGpuPtrType>, ogn::TupleDataWritable<CppType, AccessorMemoryType, AccessorGpuPtrType> >; template <typename CppType, eMemoryType AccessorMemoryType, PtrToPtrKind AccessorGpuPtrType = PtrToPtrKind::eNotApplicable> using arrayData_t = typename std::conditional_t< readOnly, ogn::ArrayDataReadOnly<CppType, AccessorMemoryType, AccessorGpuPtrType>, ogn::ArrayDataWritable<CppType, AccessorMemoryType, AccessorGpuPtrType> >; template <typename CppType, eMemoryType AccessorMemoryType, PtrToPtrKind AccessorGpuPtrType = PtrToPtrKind::eNotApplicable> using tupleArrayData_t = typename std::conditional_t< readOnly, ogn::TupleArrayDataReadOnly<CppType, AccessorMemoryType, AccessorGpuPtrType>, ogn::TupleArrayDataWritable<CppType, AccessorMemoryType, AccessorGpuPtrType> >; /** * helper that allows to make the proper call depending on the type of the handle */ inline void _moveHdl(const IAttributeData& iData, size_t index, ConstAttributeDataHandle& hdl) const { hdl = iData.moveToAnotherInstanceR(m_context, hdl, (int)index); } inline void _moveHdl(const IAttributeData& iData, size_t index, AttributeDataHandle& hdl) const { hdl = iData.moveToAnotherInstanceW(m_context, hdl, (int)index); } /** * helper that prefetch the data pointer for the attribute */ template <eMemoryType MT = MemoryType> inline void _prefetch() const { if (MT != kAny) m_cachedData = raw_data_traits_mt<MT>::data(m_context, m_handle); } GraphContextObj m_context{ 0 }; //!< Evaluation context for which this attribute is valid NameToken m_name; //!< Name by which this attribute is accessed Type m_type; //!< Type information for the actual attribute data Type m_resolvedType; //!< Type information for the attribute interface dataHandle_t m_handle; //!< Handle of the attribute data mutable typename raw_data_traits::data_access_t* m_cachedData{ nullptr }; public: /** * Default constructor will create an invalid attribute */ RuntimeAttribute() : m_type(BaseDataType::eUnknown), m_resolvedType(BaseDataType::eUnknown), m_handle(dataHandle_t::invalidValue()) { } /** * Although the destructor should always be implemented with copy constructors it has no resources to release */ ~RuntimeAttribute() = default; /** * Copy constructor, to allow these objects to be easily passed around. * Only attributes with the same accessibility and memory type should be copied. */ RuntimeAttribute(const RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>& toCopy) : m_context(toCopy.m_context), m_name(toCopy.m_name), m_type(toCopy.m_type), m_resolvedType(toCopy.m_resolvedType), m_handle(toCopy.m_handle) { if (m_handle != dataHandle_t::invalidHandle()) _prefetch<>(); else m_cachedData = nullptr; } /** * Move constructor, to allow these objects to be efficiently passed around * Only attributes with the same accessibility and memory type should be copied. */ RuntimeAttribute(RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>&& toCopy) : m_context(toCopy.m_context), m_name(toCopy.m_name), m_type(toCopy.m_type), m_resolvedType(toCopy.m_resolvedType), m_handle(toCopy.m_handle) { if (m_handle != dataHandle_t::invalidHandle()) _prefetch(); else m_cachedData = nullptr; } /** * Copy assignment, to match the constructor * Only attributes with the same accessibility and memory type should be copied. */ RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>& operator=(const RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>& toCopy) { m_context = toCopy.m_context; m_name = toCopy.m_name; m_type = toCopy.m_type; m_resolvedType = toCopy.m_resolvedType; m_handle = toCopy.m_handle; m_cachedData = toCopy.m_cachedData; return *this; } /** * Move assignment, to match the constructor * Only attributes with the same accessibility and memory type should be copied. */ RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>& operator=(RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>&& toCopy) { m_context = toCopy.m_context; m_name = toCopy.m_name; m_type = toCopy.m_type; m_resolvedType = toCopy.m_resolvedType; m_handle = toCopy.m_handle; m_cachedData = toCopy.m_cachedData; return *this; } /** * Standard constructor, extracts the attribute information if it is valid. * * @param[in] context Evaluaton context of this attribute * @param[in] handle Handle to the attribute * @param[in] resolvedType The type exposed to end user (conversion will happen if different from the actual real type) */ RuntimeAttribute(const GraphContextObj& context, dataHandle_t& handle, Type const& resolvedType = { BaseDataType::eUnknown }) : m_context(context), m_type(BaseDataType::eUnknown), m_resolvedType(resolvedType), m_handle(handle) { if (m_handle.isValid()) { m_name = context.iAttributeData->getName(context, handle); m_type = context.iAttributeData->getType(context, handle); if (m_resolvedType.baseType == BaseDataType::eUnknown) m_resolvedType = m_type; _prefetch(); } else { m_cachedData = nullptr; } } /** * Standard constructor, extracts the attribute information if it is valid. * * @param[in] context Evaluaton context of this attribute * @param[in] handle Handle to the attribute * @param[in] @param[in] resolvedType The type exposed to end user (conversion will happen if different from the actual real type) */ RuntimeAttribute(GraphContextObj&& context, dataHandle_t&& handle, Type const& resolvedType = { BaseDataType::eUnknown }) : m_context(context), m_type(BaseDataType::eUnknown), m_resolvedType(resolvedType), m_handle(handle) { if (m_handle.isValid()) { m_name = context.iAttributeData->getName(context, handle); m_type = context.iAttributeData->getType(context, handle); if (m_resolvedType.baseType == BaseDataType::eUnknown) m_resolvedType = m_type; _prefetch(); } else { m_cachedData = nullptr; } } /** * Copy the data from another runtime attribute into this one (only valid for non-const objects) * * @param[in] rhs Runtime attribute being copied */ template <typename SourceAttribute> void copyData(const SourceAttribute& rhs) { static_assert(! readOnly, "Attribute data can only be copied to writable attributes"); ConstAttributeDataHandle constSrcHandle(rhs.abi_handle()); m_context.iAttributeData->copyData(m_handle, m_context, constSrcHandle); } /** * Set the context and attribute handle for evaluation. Delayed so that the contents can be created * early with just-in-time initialization. * * @param[in] context Evaluation context to use when extracting information * @param[in] handle Handle to the attribute being wrapped * @param[in] attr The attribute object represented by this wrapper */ void reset(const GraphContextObj& context, const dataHandle_t& handle, const AttributeObj& attr) { m_handle = handle; m_context = context; if (m_handle.isValid()) { m_name = context.iAttributeData->getName(context, handle); m_type = context.iAttributeData->getType(context, handle); m_resolvedType = attr.iAttribute->getResolvedType(attr); _prefetch(); } else { m_type = Type{BaseDataType::eUnknown}; m_resolvedType = Type{ BaseDataType::eUnknown }; m_name = fabric::kUninitializedToken; m_cachedData = nullptr; } } /** * @return true if the handle and context point to valid data within the fabric */ bool isValid() const { return m_handle.isValid(); } /** * @return Name by which this attribute's data is referenced */ const NameToken& name() const { return m_name; } /** * @return Type information for this attribute's data */ const Type& type() const { return m_resolvedType.baseType != BaseDataType ::eUnknown ? m_resolvedType : m_type; } /** * @return True if the data can be accessed in a vectorized manner for this attribute */ const bool canVectorize() const { //attrib needs to exists, and to be resolved as the the same type (ie. no auto conversion) //we don't care about role return m_type.baseType != BaseDataType::eUnknown && m_type.baseType == m_resolvedType.baseType && m_type.componentCount == m_resolvedType.componentCount && m_type.arrayDepth == m_resolvedType.arrayDepth; } /** * @return The standardized type name for this attribute's data */ std::string typeName() const { return getOgnTypeName(type()); } /** * @return True if the attribute has a fully resolved type */ bool resolved() const { return m_type.baseType != BaseDataType::eUnknown; } /** * @return Raw attribute data handle to use for direct ABI manipulation */ dataHandle_t abi_handle() const { return m_handle; } /** * In vectorized context, make this attribute point to another instance * @param[in] offset The distance at which the target instance is located relative to the currently pointed one */ void adjustHandle(size_t offset) { const IAttributeData& iData = *m_context.iAttributeData; _moveHdl(iData, offset, m_handle); _prefetch(); } /** * @return Raw graph context assotiated to the handle to use for direct ABI manipulation */ GraphContextObj const& abi_context() const { return m_context; } /** * @return the number of elements in the array, or 1 if it is not an array type */ size_t size() const { size_t count{ 1 }; if (m_type.arrayDepth > 0) { ConstAttributeDataHandle constHandle{ m_handle }; // getElementCount requires the Const version m_context.iAttributeData->getElementCount(&count, m_context, &constHandle, 1); } return count; } // ------------------------------------------------------------------------------------------------------------ // Support for the various methods to retrieve a generic value type as get<TYPE>(). // // The details in the template and traits information is intended to make access to the data is generic as possible // given the information provided by the data types and this attribute's class members. // // Almost all of the time you will access what appears to be a single templated method as this: // // auto dataAccessor = thisAttribute.get<DATA_TYPE>(); // if (dataAccessor.isValid()) ... // The data is the right type, and is valid // // e.g. auto intData = intAttribute.get<int>(); // // This hardcodes the compile time information about whether the attribute was read-only, what type of data it // accepts, and the memory location, into what appears to be a single accessor (though behind the scenes it is // actually a small set of them that provide appropriate access points for the type of data). // // For special attributes who have their memory location designated as "any", the decision of memory location is // made at access time so two variations of the above method are available which explicitly get accessors on either // the CPU or GPU memory locations: // // auto dataAccessorCpu = thisAttribute.getCpu<DATA_TYPE>(); // auto dataAccessorGpu = thisAttribute.getGpu<DATA_TYPE>(); // // Note: Use only one of the two memory location accessors as accessing one type will often invalidate the other // type, potentially causing excessive slow copying of data. // template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> const simpleData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() const { using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; using data_ptr_access_t = typename data_traits::data_ptr_access_t; return simpleData_t<POD, GetAtMemoryType, GetAtGpuPtrType>( data_traits::matchesType(m_resolvedType) ? data_ptr_access_t(m_cachedData) : nullptr, m_type); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> simpleData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() { static_assert(! readOnly, "non-const get() can only be called on output attributes"); using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; using data_ptr_access_t = typename data_traits::data_ptr_access_t; return simpleData_t<POD, GetAtMemoryType, GetAtGpuPtrType>( data_traits::matchesType(m_type) ? data_ptr_access_t(m_cachedData) : nullptr, m_type); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> const tupleData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() const { using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; using data_ptr_access_t = typename data_traits::data_ptr_access_t; return tupleData_t<POD, GetAtMemoryType, GetAtGpuPtrType>( data_traits::matchesType(m_resolvedType) ? data_ptr_access_t(m_cachedData) : nullptr, m_type); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> tupleData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() { static_assert(! readOnly, "non-const get() can only be called on output attributes"); using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; using data_ptr_access_t = typename data_traits::data_ptr_access_t; return tupleData_t<POD, GetAtMemoryType, GetAtGpuPtrType>( data_traits::matchesType(m_type) ? data_ptr_access_t(m_cachedData) : nullptr, m_type); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> const arrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() const { using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; return arrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType>(m_context, &m_handle, data_traits::matchesType(m_type)); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> arrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() { static_assert(! readOnly, "non-const get() can only be called on output attributes"); using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; return arrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType>(m_context, &m_handle, data_traits::matchesType(m_type)); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> const tupleArrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() const { using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; return tupleArrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType>(m_context, &m_handle, data_traits::matchesType(m_type)); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> tupleArrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() { static_assert(! readOnly, "non-const get() can only be called on output attributes"); using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; return tupleArrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType>(m_context, &m_handle, data_traits::matchesType(m_type)); } // ------------------------------------------------------------------------------------------------------------ // Variations of the get<>() functions that force either CPU or GPU memory location. These are used when the // memory location of the attributes was set to "any", meaning they decide CPU or GPU at runtime, though there's // no reason they can't be used for explicit memory locations either. template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> const simpleData_t<POD, MemoryType, GpuPtrType> get() const { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> simpleData_t<POD, MemoryType, GpuPtrType> get() { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> const tupleData_t<POD, MemoryType, GpuPtrType> get() const { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> tupleData_t<POD, MemoryType, GpuPtrType> get() { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> const arrayData_t<POD, MemoryType, GpuPtrType> get() const { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> arrayData_t<POD, MemoryType, GpuPtrType> get() { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> const tupleArrayData_t<POD, MemoryType, GpuPtrType> get() const { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> tupleArrayData_t<POD, MemoryType, GpuPtrType> get() { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } // -------------------------------------------------------------------------------------------------------------- template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> const simpleData_t<POD, kCpu> getCpu() const { _prefetch<kCpu>(); return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> simpleData_t<POD, kCpu> getCpu() { _prefetch<kCpu>(); return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> const tupleData_t<POD, kCpu> getCpu() const { _prefetch<kCpu>(); return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> tupleData_t<POD, kCpu> getCpu() { _prefetch<kCpu>(); return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> const arrayData_t<POD, kCpu> getCpu() const { return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> arrayData_t<POD, kCpu> getCpu() { return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> const tupleArrayData_t<POD, kCpu> getCpu() const { return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> tupleArrayData_t<POD, kCpu> getCpu() { return getAt<POD, kCpu>(); } // -------------------------------------------------------------------------------------------------------------- template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> const simpleData_t<POD, kCuda, GpuPtrType> getGpu() const { _prefetch<kCuda>(); return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> simpleData_t<POD, kCuda, GpuPtrType> getGpu() { _prefetch<kCuda>(); return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> const tupleData_t<POD, kCuda, GpuPtrType> getGpu() const { _prefetch<kCuda>(); return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> tupleData_t<POD, kCuda, GpuPtrType> getGpu() { _prefetch<kCuda>(); return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> const arrayData_t<POD, kCuda, GpuPtrType> getGpu() const { return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> arrayData_t<POD, kCuda, GpuPtrType> getGpu() { return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> const tupleArrayData_t<POD, kCuda, GpuPtrType> getGpu() const { return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> tupleArrayData_t<POD, kCuda, GpuPtrType> getGpu() { return getAt<POD, kCuda, GpuPtrType>(); } // -------------------------------------------------------------------------------------------------------------- // Raw data access method, which returns size and pointer but ignores type information. Its usage should be rare, // but it is useful to have it available for those times. template <eMemoryType RawMemoryType = MemoryType, PtrToPtrKind RawGpuPtrType = PtrToPtrKind::eNotApplicable> void rawData(ConstRawPtr& ptr, size_t& size) const { static_assert(RawMemoryType != kAny, "Cannot access raw data on an attribute with runtime memory location"); using data_traits = data_type_traits<uint8_t, readOnly, RawMemoryType, RawGpuPtrType>; data_traits::dataReference(m_context, m_handle, ptr, size); } template <eMemoryType RawMemoryType = MemoryType, PtrToPtrKind RawGpuPtrType = PtrToPtrKind::eNotApplicable> void rawData(RawPtr& ptr, size_t& size) { static_assert(RawMemoryType != kAny, "Cannot access raw data on an attribute with runtime memory location"); static_assert(! readOnly, "Cannot access writable raw data on a read-only attribute"); using data_traits = data_type_traits<uint8_t, readOnly, RawMemoryType, RawGpuPtrType>; data_traits::dataReference(m_context, m_handle, ptr, size); } }; /* Runtime Attribute type traits */ template<class T> struct is_runtime_data : std::false_type {}; template<class T, bool ReadOnly, eMemoryType MemoryType> struct is_runtime_data<ArrayData<T, ReadOnly, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<SimpleDataReadOnly<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<SimpleDataWritable<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<TupleDataReadOnly<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<TupleDataWritable<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<ArrayDataReadOnly<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<ArrayDataWritable<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<TupleArrayDataReadOnly<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<TupleArrayDataWritable<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/TypeTraits.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/Type.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/TemplateUtils.h> namespace omni { namespace graph { namespace core { namespace ogn { // Helper that provides a templated conversion from C++ simple data type to the base data type enum in core::Type template <typename CppType> struct attribute_base_t { static constexpr BaseDataType value = BaseDataType::eUnknown; }; template <> struct attribute_base_t<bool> { static constexpr BaseDataType value = BaseDataType::eBool; }; template <> struct attribute_base_t<double> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<float> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<int> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<int64_t> { static constexpr BaseDataType value = BaseDataType::eInt64; }; template <> struct attribute_base_t<Token> { static constexpr BaseDataType value = BaseDataType::eToken; }; template <> struct attribute_base_t<NameToken> { static constexpr BaseDataType value = BaseDataType::eToken; }; template <> struct attribute_base_t<Path> { static constexpr BaseDataType value = BaseDataType::eRelationship; }; template <> struct attribute_base_t<TargetPath> { static constexpr BaseDataType value = BaseDataType::eRelationship; }; template <> struct attribute_base_t<uint32_t> { static constexpr BaseDataType value = BaseDataType::eUInt; }; template <> struct attribute_base_t<uint64_t> { static constexpr BaseDataType value = BaseDataType::eUInt64; }; template <> struct attribute_base_t<uint8_t> { static constexpr BaseDataType value = BaseDataType::eUChar; }; template <> struct attribute_base_t<char> { static constexpr BaseDataType value = BaseDataType::eUChar; };//char is used for string, but implemented as uchar //carb base types template <> struct attribute_base_t<carb::Float2> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<carb::Float3> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<carb::Float4> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<carb::Double2> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<carb::Double3> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<carb::Double4> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<carb::Int2> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<carb::Int3> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<carb::Int4> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<carb::Uint2> { static constexpr BaseDataType value = BaseDataType::eUInt; }; template <> struct attribute_base_t<carb::Uint3> { static constexpr BaseDataType value = BaseDataType::eUInt; }; template <> struct attribute_base_t<carb::Uint4> { static constexpr BaseDataType value = BaseDataType::eUInt; }; // Helper struct to convert at compile time from BaseDataType to corresponding cpp type template <BaseDataType eBaseType> struct attribute_t { using type = void; }; template <> struct attribute_t<BaseDataType::eBool> { using type = bool; }; template <> struct attribute_t<BaseDataType::eDouble> { using type = double; }; template <> struct attribute_t<BaseDataType::eFloat> { using type = float; }; template <> struct attribute_t<BaseDataType::eInt> { using type = int; }; template <> struct attribute_t<BaseDataType::eInt64> { using type = int64_t; }; template <> struct attribute_t<BaseDataType::eToken> { using type = Token; }; template <> struct attribute_t<BaseDataType::eUInt> { using type = uint32_t; }; template <> struct attribute_t<BaseDataType::eUInt64> { using type = uint64_t; }; template <> struct attribute_t<BaseDataType::eUChar> { using type = uint8_t; }; // Templated conversions defining attribute traits given the actual C++ data types they implement. // The role does not enter in to this conversion as it is an interpretation of a data type, not a separate data type. // // Constants: // isArray Boolean indicating if the data type includes an array of variable length // tupleCount Number of tuple values in the data type (1 for a simple value) // baseType BaseDataType enum matching the data's unencumbered type // Types: // actual_t Actual data type to be handled (managing types that resolve to the same POD) // element_t Data type for the unencumbered value (e.g float for a float[][3]) // data_t Data type for a single value (e.g. float for float/float[] but float[3] for float[3]/float[][3]) // // The comments above the definitions give examples of how the values are set for representative template types // // Note: The pxr GfVec/GfMatrix types are not included here for simplicity, though they can be defined in a separate // file for those that wish to use them by specializing the attribute_type_traits struct. template <typename CppType> struct attribute_type_traits { // float/float[3] -> false, float[]/float[][3] -> true static constexpr bool isArray = !is_bounded_array<CppType>::value && std::is_array<CppType>::value; static constexpr uint8_t arrayDepth = isArray ? 1 : 0; // Flags that break apart the data type into the four main mutually-exclusive categories used as accessors static constexpr bool isSimpleType = !is_bounded_array<CppType>::value && !std::is_array<CppType>::value; static constexpr bool isTupleType = is_bounded_array<CppType>::value; static constexpr bool isArrayType = std::is_array<CppType>::value && !is_bounded_array<CppType>::value && !is_bounded_array<std::remove_extent_t<CppType>>::value; static constexpr bool isTupleArrayType = std::is_array<CppType>::value && !is_bounded_array<CppType>::value && is_bounded_array<std::remove_extent_t<CppType>>::value; // Get the actual data type this class references. This is needed due to the fact that our token implementation, // NameToken, is typedefed to uint64_t, making it indistinguishable from a regular uint64_t to the compiler. By // passing ogn::Token/ogn::Token[] instead, the ambiguity can be resolved and the actual type deduced. (This // wouldn't quite work as-is if it supported tuples, but as it doesn't the extra complexity can be omitted.) using actual_t = std::conditional_t<std::is_same<std::remove_all_extents_t<CppType>, ogn::Token>::value, std::conditional_t<isArray, NameToken[], NameToken>, CppType >; // float/float[3]/float[]/float[][3] -> float using element_t = std::remove_const_t<std::remove_all_extents_t<actual_t>>; // float/float[]/float[3]/float[][3] -> BaseDataType::eFloat static constexpr BaseDataType baseType = attribute_base_t<element_t>::value; // float/float[] -> float, float[3]/float[][3] -> float[3] using data_t = std::conditional_t<isArray, std::remove_extent_t<actual_t>, actual_t>; // float/float[] -> 1, float[3]/float[][3] -> 3 static constexpr int tupleCount = std::is_array<data_t>::value ? std::extent<data_t, 0>::value : 1; }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/TypeConversion.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/ogn/TypeTraits.h> namespace omni { namespace graph { namespace core { namespace ogn { // Utility struct to implement conversion between 2 BaseDataType // Note: simple runtime dispatch on the source type (dst type must be known at compile time) static constexpr BaseDataType kLastBaseType = BaseDataType::eToken; template <typename DST_TYPE, BaseDataType BASE_SRC_TYPE = kLastBaseType> struct BaseTypeConversion { using next = BaseTypeConversion<DST_TYPE, (BaseDataType)((uint8_t)BASE_SRC_TYPE - 1)>; static int canConvertFrom(BaseDataType srcType) { if (srcType == BASE_SRC_TYPE) return false; return next::canConvertFrom(srcType); } static bool convert(DST_TYPE* dst, void const* src, BaseDataType srcType, size_t count) { if (srcType == BASE_SRC_TYPE) return false; return next::convert(dst, src, srcType, count); } }; template <typename T> struct BaseTypeConversion<T, BaseDataType::eUnknown> { static constexpr int canConvertFrom(BaseDataType) { return 0; } static bool convert(T*, void const*, BaseDataType, size_t) { return false; } }; template<typename T, BaseDataType eDataType> struct BaseTypeConversionBase { using next = BaseTypeConversion<T, (BaseDataType)((uint8_t)eDataType - 1)>; static bool canConvertFromBase(BaseDataType srcType) { if (eDataType == srcType) return true; return false; } template<typename FUNC> static bool convertBase(T* dst, void const* srcBuffer, BaseDataType srcType, size_t count, FUNC const& func) { if (eDataType == srcType) { using srcType = typename attribute_t<eDataType>::type; srcType const* srcPtr = (srcType const*)srcBuffer; while (count--) func(*dst++, *srcPtr++); return true; } return next::convert(dst, srcBuffer, srcType, count); } }; #define IMPLEMENT_BASE_TYPE_CONVERSION(dstType, srcBaseType, func)\ template <> struct BaseTypeConversion<dstType, srcBaseType> : public BaseTypeConversionBase<dstType, srcBaseType> {\ static int canConvertFrom(BaseDataType srcType) {if(canConvertFromBase(srcType)) return __LINE__; return next::canConvertFrom(srcType);}\ static bool convert(dstType* dst, void const* srcBuffer, BaseDataType srcType, size_t count) {return convertBase(dst, srcBuffer, srcType, count, func);}} #define IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(SRC, DST) \ IMPLEMENT_BASE_TYPE_CONVERSION(DST, attribute_base_t<SRC>::value, [](DST& dst, SRC const& src) { dst = (DST)src; }) //! <summary> //! Implements all conversions //! Note: order is important for BOTH source type and destination type //! source type oder : template instantiation needs to happen in order of the enum since higher type ID template //! will call lower ones (so they need to be already defined) //! => ORDER IN THE BASE TYPE ENUM ORDER //! dst type order : conversion precedence will follow order of declaration //! ie. : when several conversions are possible, the one declared before the others will be used //! => ORDER BY "QUALITY" OF THE CONVERSION //! </summary> IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, unsigned int); // signed -> unsigned is allowed by fabric IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, bool); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, uint64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, bool); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, uint64_t); // signed -> unsigned is allowed by fabric IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, bool); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, unsigned int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, bool); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(float, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(float, int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(float, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(double, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(double, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(double, int); //!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!// //!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!// // This implements the Double dispatch solve, when none of the type are known at compile time template <BaseDataType BASE_DST_TYPE = kLastBaseType> struct BaseDoubleDispatchTypeConversionTest { using TYPE = typename attribute_t<BASE_DST_TYPE>::type; static int canConvert(BaseDataType from, BaseDataType to) { if (BASE_DST_TYPE == to) return BaseTypeConversion<TYPE>::canConvertFrom(from); return BaseDoubleDispatchTypeConversionTest<(BaseDataType)((uint8_t)BASE_DST_TYPE - 1)>::canConvert(from, to); } static bool convert(void* dstBuffer, BaseDataType dstType, void const* srcBuffer, BaseDataType srcType, size_t count) { if (BASE_DST_TYPE == dstType) return BaseTypeConversion<TYPE>::convert((TYPE*) dstBuffer, srcBuffer, srcType, count); return BaseDoubleDispatchTypeConversionTest<(BaseDataType)((uint8_t)BASE_DST_TYPE - 1)>::convert(dstBuffer, dstType, srcBuffer, srcType, count); } }; template <> struct BaseDoubleDispatchTypeConversionTest<BaseDataType::eUnknown> { static int canConvert(BaseDataType from, BaseDataType to) { return 0; } static bool convert(void* dst, BaseDataType dstType, void const* srcBuffer, BaseDataType srcType, size_t count) { return false; } }; //!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!// //!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!// //Convert at compile time a type to its corresponding BaseDataType template<typename T> constexpr BaseDataType getDataType() { using U = std::decay_t<T>; using V = std::remove_pointer_t<U>; return attribute_base_t<V>::value; } //Returns a positive value if a conversion exists between the 2 provided types, 0 else // The lower the returned value is, the prefered is the conversion static inline int isRawDataConvertible(BaseDataType from, BaseDataType to) { return BaseDoubleDispatchTypeConversionTest<>::canConvert(from, to); } // Indicate whether, for a given type pair reputed to be convertible, the actual conversion can be bypassed static inline bool isRawBinaryDataCompatible(BaseDataType t0, BaseDataType t1) { if (t0 == t1) return true; if (t0 > t1) std::swap(t0, t1); switch (t0)//always smaller { case omni::graph::core::BaseDataType::eUnknown: break; case omni::graph::core::BaseDataType::eBool: break; case omni::graph::core::BaseDataType::eUChar: break; case omni::graph::core::BaseDataType::eInt: break; case omni::graph::core::BaseDataType::eUInt: break; case omni::graph::core::BaseDataType::eInt64: break; case omni::graph::core::BaseDataType::eUInt64: return t1 == omni::graph::core::BaseDataType::eToken; break; case omni::graph::core::BaseDataType::eHalf: break; case omni::graph::core::BaseDataType::eFloat: break; case omni::graph::core::BaseDataType::eDouble: break; case omni::graph::core::BaseDataType::eToken: break; case omni::graph::core::BaseDataType::eRelationship: break; case omni::graph::core::BaseDataType::eAsset: break; case omni::graph::core::BaseDataType::ePrim: break; case omni::graph::core::BaseDataType::eConnection: break; case omni::graph::core::BaseDataType::eTag: break; default: break; } return false; } //Perform the actual conversion between 2 instantiated values template <typename Dst> static inline bool doConversion(Dst* dst, void const* src, BaseDataType srcType, size_t count) { static_assert(getDataType<Dst>() != BaseDataType::eUnknown, ""); CARB_ASSERT(srcType != BaseDataType::eUnknown); using UnderlyingType = typename attribute_t<getDataType<Dst>()>::type;//ex: Vec3 -> float return BaseTypeConversion<UnderlyingType>::convert((UnderlyingType*)dst, src, srcType, count); } static inline bool doConversion(void* dst, BaseDataType dstType, void const* src, BaseDataType srcType, size_t count) { CARB_ASSERT(dstType != BaseDataType::eUnknown); CARB_ASSERT(srcType != BaseDataType::eUnknown); return BaseDoubleDispatchTypeConversionTest<>::convert(dst, dstType, src, srcType, count); } // Helper to determine if the given roles are compatible for matching base type static inline bool areMatchedBaseTypesRoleCompatible(BaseDataType baseType, AttributeRole srcRole, AttributeRole destRole) { // Execution (which has base type uint) can only be connected to another Execution attribute if ((baseType == BaseDataType::eUInt) && ((destRole == AttributeRole::eExecution) != (srcRole == AttributeRole::eExecution))) { return false; } else if (baseType == omni::fabric::BaseDataType::eUChar) { // Path and strings are compatible bool isSrcString = (srcRole == AttributeRole::ePath || srcRole == AttributeRole::eText); bool isDstString = (destRole == AttributeRole::ePath || destRole == AttributeRole::eText); return isSrcString == isDstString; } return true; }; // Helper to return true if the given types are compatible static inline int areTypesCompatible(const Type& srcType, const Type& destType) { if (srcType == destType) return 1; // They aren't exactly the same, but check for signed/unsigned compatibility, which FC can support if (srcType.arrayDepth != destType.arrayDepth) return 0; if (srcType.componentCount != destType.componentCount) return 0; // If base types match, we are compatible with a possible exception for the role if (srcType.baseType == destType.baseType) return areMatchedBaseTypesRoleCompatible(srcType.baseType, srcType.role, destType.role) ? 1 : 0; // Arrays are not convertible // but arrays of int(64) signed <-> unsigned are through fabric if (srcType.arrayDepth) { return ((srcType.baseType == BaseDataType::eInt || srcType.baseType == BaseDataType::eUInt) && (destType.baseType == BaseDataType::eInt || destType.baseType == BaseDataType::eUInt)) || ((srcType.baseType == BaseDataType::eInt64 || srcType.baseType == BaseDataType::eUInt64) && (destType.baseType == BaseDataType::eInt64 || destType.baseType == BaseDataType::eUInt64)) ? 1 : 0; } return isRawDataConvertible(srcType.baseType, destType.baseType); }; // Helper to return if there is any compatibility between two type groups static inline int areAnyTypesCompatible(const std::vector<Type>& srcTypes, const std::vector<Type>& destTypes) { for (const auto& src: srcTypes) { for (const auto& dst: destTypes) { if (areTypesCompatible(src, dst)) return true; } } return false; } //Small struct helper that: // - hold the converted value so a reference can be returned // - specialize to an empty pass-through if no conversion exists template <typename DataType, BaseDataType eType = getDataType<DataType>()> struct Converter { private: using Data = typename std::decay<DataType>::type; Data convertedValue{}; public: Converter(){} DataType const* convertValue(DataType const* originalData, Type const& originalDataType) const { if (willConvert(originalDataType)) { doConversion( &convertedValue, (void*)originalData, originalDataType.baseType, originalDataType.componentCount); return (DataType const*)&convertedValue; } return originalData; } bool willConvert(Type const& originalDataType) const { BaseDataType constexpr bdt = getDataType<DataType>(); return !isRawBinaryDataCompatible(originalDataType.baseType, bdt); } }; template <typename DataType> struct Converter<DataType, BaseDataType::eUnknown> { inline DataType const* convertValue(DataType const* originalData, Type const&) const { return originalData; } bool willConvert(Type const& originalDataType) const { return false; } }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/Bundle.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ================================================================================================================= // This file contains interface classes which wrap attribute bundles in the OGN database for ease of use // // BundleContents Accessor to get at the attributes inside the bundle // BundleAttribute Access to the bundle attribute, with appropriate read/write abilities depending on port type // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= #ifdef THIS_IS_INCLUDED_IN_THE_DOCUMENTATION // The information bracketed here with begin/end describes the interface that is recommended for use with bundled // attributes. The documentation uses these markers to perform a literal include of this code into the docs so that // it can be the single source of truth. Note that the interface described here is not the complete set of C++ // functions available, merely the ones that make sense for the user to access when dealing with bundles. // begin-bundle-interface-description // A bundle can be described as an opaque collection of attributes that travel together through the graph, whose // contents and types can be introspected in order to determine how to deal with them. This section describes how // the typical node will interface with the bundle content access. Use of the attributes within the bundles is the // same as for the extended type attributes, described with their access methods. // // An important note regarding GPU bundles is that the bundle itself always lives on the CPU, specifying a memory // space of "GPU/CUDA" for the bundle actually means that the default location of the attributes it contains will // be on the GPU. // // The main bundle is extracted the same as any other attribute, by referencing its generated database location. // For this example the bundle will be called "color" and it will have members that could either be the set // ("r", "g", "b", "a") or the set ("c", "m", "y", "k") with the obvious implications of implied color space. // // The bundle itself has a path to which it refers; normally unnecessary to use but helpful for debugging std::cout << "The color bundle came from " << db.inputs.color.path() << std::endl; // As with other attribute types you can get an accessor to the bundle: const auto& colorBundle = db.inputs.color(); // The accessor can determine if it points to valid data const bool validColor = colorBundle.isValid(); // It can be queried for the number of attributes it holds auto bundleAttributeCount = colorBundle.size(); // It can have its contents iterated over for (const auto& bundledAttribute : colorBundle) { /* ... */ } // It can be queried for an attribute in it with a specific name auto bundledAttribute = colorBundle.attributeByName(db.tokens.red); // And on the rare occasion when it is necessary, it can access the low level IBundle interface or ABI handle of the bundle's data // to make direct ABI calls on it. (This is discouraged as it may bypass some important state updates.) const auto& bundleHandle = colorBundle.abi_bundleHandle(); // *** The rest of these methods are for output bundles only, as they change the makeup of the bundle // It can be assigned to an output bundle, which merely transfers ownership of the bundle. // As in all C++ it's important to make the distinction between assignment and merely obtaining a reference auto& computedColorBundle = db.outputs.computedColorBundle(); // No copy, just assignment of a reference object computedColorBundle = colorBundle; // Copy the input bundle to the output bundle // It can have its contents (i.e. attribute membership) cleared computedColorBundle.clear(); // It can insert a new bundle, without replacing its current contents (with the caveat that all attribute names // in the current and inserted bundle must be unique) computedColorBundle.insertBundle(colorBundle); // It can have a single attribute from another bundle inserted into its current list, like if you don't want // the transparency value in your output color computedColorBundle.clear(); computedColorBundle.insertAttribute(colorBundle.attributeByName(db.tokens.red)); computedColorBundle.insertAttribute(colorBundle.attributeByName(db.tokens.green)); computedColorBundle.insertAttribute(colorBundle.attributeByName(db.tokens.blue)); // It can add a brand new attribute with a specific type and name namespace og = omni::graph::core; og::Type floatType(og::BaseDataType::eFLOAT); computedColorBundle.addAttribute(db.tokens.opacity, floatType); // If you are adding an array attribute you can set its initial element count with the same call og::Type boolArrayType(og::BaseDataType::eBOOLEAN, 1, 1); computedColorBundle.addAttribute(db.tokens.bits, boolArrayType, 32); // If you want to remove an attribute from a bundle you only need its name computedColorBundle.removeAttribute(db.tokens.bits); // end-bundle-interface-description #endif #include <omni/graph/core/ogn/RuntimeAttribute.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/IBundle.h> #include <omni/graph/core/ComputeGraph.h> #include <omni/graph/core/IBundleChanges.h> #include <carb/InterfaceUtils.h> namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== /** * Class responsible for managing the interaction with bundles of attributes. * It wraps the bundle in an interface with a more natural interaction than the raw ABI calls. * * <AttributeType> How the attribute is interpreted - input, output, or state value * <MemoryType> where the memory for the attributes in this bundle will live (CPU, GPU, or decided at runtime) * <GpuPtrType> where the pointer to array attributes in this bundle will live (CPU or GPU, for GPU data only) */ template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> class BundleContents { //! Aliases used during overload resolution to differentiate between read-only and read-write. using roTag_t = std::true_type; using rwTag_t = std::false_type; //! The writability of a bundle will determine what kinds of operations can be performed on it static constexpr bool readOnly = (AttributeType == kOgnInput); using readOnly_t = std::conditional_t<AttributeType == kOgnInput, roTag_t, rwTag_t>; //! By defining the bundle type based on attribute type duplication of code in this class can be avoided using bundleHandle_t = std::conditional_t<readOnly, ConstBundleHandle, BundleHandle>; //! By defining the interface type based on attribute type duplication of code in this class can be avoided using bundleInterface_t = std::conditional_t<readOnly, IConstBundle2, IBundle2>; //! The handle types are not simply "X" and "const X" variations so the type has to be explicitly defined //! for writable (output/state) and non-writable (input) attribute types. using dataHandle_t = std::conditional_t<readOnly, ConstAttributeDataHandle, AttributeDataHandle>; //! Short form to reduce line length using runtime_t = RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>; using bundleInterfacePtr = omni::core::ObjectPtr<bundleInterface_t>; bundleInterfacePtr m_bundlePtr; runtime_t m_invalid; //!< Special object representing invalid data mutable gsl::span<runtime_t> m_iterableArray; //!< Iterator wrapper // ================================================================================ // Functions supporting both read-only and writable versions, necessitated by the different function calls // and argument types used for both (i.e. you can't just do a const_cast for these). Ideally they would be // broken out into utility functions and shared everywhere. // // They are selected by calling them using a first argument of "readOnly_t()", which will use overloading to // select the proper version. (roTag_t for read-only versions, rwTag_t for writable versions) /** * Extract the interface for an attribute in the bundle with the given name. * * @param name Token representing the name of the attribute in the bundle * @return Bundle member from which attribute information can be extracted (invalid if name was not found) */ dataHandle_t extractNamedAttribute(rwTag_t, NameToken const& name) const { return m_bundlePtr->getAttributeByName(name); } dataHandle_t extractNamedAttribute(roTag_t, NameToken const& name) const { return m_bundlePtr->getConstAttributeByName(name); } /** * Get the list of attribute handles present on the bundle. * * @param Type representing writable (rwTag_t) or read-only (roTag_t) data * @param allHandles Pointer to array of handles that were extracted * @param count The size of the provided pointer array (in pointer count) */ void extractHandles(rwTag_t, dataHandle_t* allHandles, size_t count) const { m_bundlePtr->getAttributes(allHandles, count); } void extractHandles(roTag_t, dataHandle_t* allHandles, size_t count) const { m_bundlePtr->getConstAttributes(allHandles, count); } /** * Construct bundle interface based on provided context and bundle handle. * * @param context Evaluation context. * @param handle Bundle handle. */ bundleInterfacePtr getInterface(rwTag_t, GraphContextObj const& context, bundleHandle_t handle) const { return getBundleFactoryInterface()->getBundle(context, handle); } bundleInterfacePtr getInterface(roTag_t, GraphContextObj const& context, bundleHandle_t handle) const { return getBundleFactoryInterface()->getConstBundle(context, handle); } /** * Construct bundle interface based on provided context and bundle path. * * @param context Evaluation context. * @param path Bundle path. */ bundleInterfacePtr getInterface(rwTag_t, GraphContextObj const& context, omni::fabric::PathC path) const { auto factory = omni::core::cast<IBundleFactory2>(getBundleFactoryInterface()); return factory ? factory->getBundleFromPath(context, path) : bundleInterfacePtr{}; } bundleInterfacePtr getInterface(roTag_t, GraphContextObj const& context, omni::fabric::PathC path) const { auto factory = omni::core::cast<IBundleFactory2>(getBundleFactoryInterface()); return factory ? factory->getConstBundleFromPath(context, path) : bundleInterfacePtr{}; } /** * Get read-only or read-write handle depending on writability permissions of this interface. */ bundleHandle_t getBundleHandle(rwTag_t) const { return m_bundlePtr->getHandle(); } bundleHandle_t getBundleHandle(roTag_t) const { return m_bundlePtr->getConstHandle(); } void clearAttributeCache() const { delete[] m_iterableArray.data(); m_iterableArray = gsl::span<runtime_t>{}; } void updateAttributeCache() const { // Only reallocate the bundle members if the size changed. If it didn't then the // in-place constructor will put the correct data in place. size_t newSize = attributeCount(); if (!m_iterableArray.empty() && (newSize != m_iterableArray.size())) { clearAttributeCache(); } if (m_iterableArray.empty() && (newSize > 0)) { m_iterableArray = gsl::span<runtime_t>{ new runtime_t[newSize], newSize }; } if (!m_iterableArray.empty()) { auto context = m_bundlePtr->getContext(); dataHandle_t* allHandles = reinterpret_cast<dataHandle_t*>(alloca(newSize * sizeof(dataHandle_t))); extractHandles(readOnly_t(), allHandles, newSize); for (size_t i = 0; i < newSize; ++i) { new (&m_iterableArray[i]) runtime_t(context, allHandles[i]); } } } public: // Pass through the span iterator so that this class can iterate over it transparently using iterator = typename gsl::span<runtime_t>::iterator; using reverse_iterator = typename gsl::span<runtime_t>::reverse_iterator; /** * Default constructor */ BundleContents() = default; /** * Constructor with direct initialization from context and bundle handle. */ BundleContents(GraphContextObj const& context, bundleHandle_t handle) : BundleContents() { reset(context, handle); } /** * Constructor with direct initialization from context and bundle path. */ BundleContents(GraphContextObj const& context, omni::fabric::Path path) : BundleContents() { reset(getInterface(readOnly_t{}, context, path)); } /** * Data managed by the bundle cannot be duplicated */ BundleContents(const BundleContents&) = delete; BundleContents& operator=(const BundleContents&) = delete; /** * Clean up any cached data */ ~BundleContents() { clearAttributeCache(); } /** * @deprecated Calling abi_primHandle() is deprecated. Use abi_bundleHandle() instead! */ [[deprecated("Calling abi_primHandle() is deprecated. Use abi_bundleHandle() instead!")]] bundleHandle_t const abi_primHandle() const { return abi_bundleHandle(); } /** * @deprecated Calling abi_primHandle() is deprecated. Use abi_bundleHandle() instead! */ [[deprecated("Calling abi_primHandle() is deprecated. Use abi_bundleHandle() instead!")]] bundleHandle_t abi_primHandle() { return abi_bundleHandle(); } /** * Return bundle factory interface. */ static IBundleFactory* getBundleFactoryInterface() { static omni::core::ObjectPtr<IBundleFactory> factory = carb::getCachedInterface<ComputeGraph>()->getBundleFactoryInterfacePtr(); return factory.get(); } /** * @return the raw bundle handle for use via the ABI directly */ bundleHandle_t const abi_bundleHandle() const { if (isValid()) return getBundleHandle(readOnly_t{}); return {}; } /** * @return the raw bundle handle for use via the ABI directly */ bundleHandle_t abi_bundleHandle() { if (isValid()) return getBundleHandle(readOnly_t{}); return {}; } /** * @return the raw bundle interface for use via the ABI directly */ bundleInterface_t* abi_bundleInterface() const { return m_bundlePtr.get(); } /** * @return true if the handle points to valid data within the fabric */ bool isValid() const { return m_bundlePtr && m_bundlePtr->isValid(); } /** * Set the bundle for evaluation. Delayed so that the contents can be created * early with just-in-time initialization. * * @param bundle Evaluation context to use when extracting information */ void reset(omni::core::ObjectParam<bundleInterface_t> bundle) { m_bundlePtr = omni::core::borrow(bundle.get()); clearAttributeCache(); } /** * Set the context and prim handle for evaluation. Delayed so that the contents can be created * early with just-in-time initialization. * * @param context Evaluation context to use when extracting information * @param handle Virtual prim implementing the bundle interface */ void reset(GraphContextObj const& context, bundleHandle_t handle) { reset(getInterface(readOnly_t{}, context, handle)); } /** * @deprecated Calling size() is deprecated. Use attributeCount instead! */ [[deprecated("Calling size() is deprecated. Use attributeCount instead!")]] size_t size() const { return attributeCount(); } /** * @return The number of attributes contained within the bundle if valid, 0 otherwise */ size_t attributeCount() const { return isValid() ? m_bundlePtr->getAttributeCount() : 0; } /** * @return The number of child bundles contained within the bundle if valid, 0 otherwise */ size_t childCount() const { return isValid() ? m_bundlePtr->getChildBundleCount() : 0; } /** * Extract the interface for an attribute in the bundle with the given name. * * @param[in] name Token representing the name of the attribute in the bundle * @return Bundle member from which attribute information can be extracted (invalid if name was not found) */ runtime_t const attributeByName(NameToken const& name) const { if ((name == omni::fabric::kUninitializedToken) or ! isValid()) { return runtime_t(); } updateAttributeCache(); auto namedAttribute = extractNamedAttribute(readOnly_t(), name); return runtime_t(m_bundlePtr->getContext(), namedAttribute); } /** * Iteration interfaces that just pass through responsibility to the underlying span data. * Inputs call with const objects, hence the two variations of the functions. */ iterator begin() const { updateAttributeCache(); return m_iterableArray.begin(); } iterator end() const { updateAttributeCache(); return m_iterableArray.end(); } reverse_iterator rbegin() const { updateAttributeCache(); return m_iterableArray.rbegin(); } reverse_iterator rend() const { updateAttributeCache(); return m_iterableArray.rend(); } iterator begin() { updateAttributeCache(); return m_iterableArray.begin(); } iterator end() { updateAttributeCache(); return m_iterableArray.end(); } reverse_iterator rbegin() { updateAttributeCache(); return m_iterableArray.rbegin(); } reverse_iterator rend() { updateAttributeCache(); return m_iterableArray.rend(); } /** * Assignment operator is only active for writable bundle contents (i.e. outputs) * Copies the entire input bundle onto the output. * * @param[in] toBeCopied Bundle attribute to be copied * @returns Reference to this bundle */ template <eAttributeType AttributeTypeToCopy, eMemoryType MemoryTypeToCopy, PtrToPtrKind GpuPtrTypeToCopy = PtrToPtrKind::eNotApplicable> BundleContents<AttributeType, MemoryType, GpuPtrType>& operator=(const BundleContents<AttributeTypeToCopy, MemoryTypeToCopy, GpuPtrTypeToCopy>& toBeCopied) { static_assert(!readOnly, "Assignment is not allowed on input bundles"); m_bundlePtr->clearContents(); m_bundlePtr->copyBundle(toBeCopied.abi_bundleHandle()); reset(m_bundlePtr); return *this; } /** * Bundle insertion is only active for writable bundle contents (i.e. outputs) * Adds the entire input bundle onto the output. * * @param[in] toBeInserted Bundle attribute to be inserted * @returns Reference to this bundle */ template <eAttributeType AttributeTypeToInsert, eMemoryType MemoryTypeToInsert, PtrToPtrKind GpuPtrTypeToInsert = PtrToPtrKind::eNotApplicable> void insertBundle(const BundleContents<AttributeTypeToInsert, MemoryTypeToInsert, GpuPtrTypeToInsert>& toBeInserted) { static_assert(!readOnly, "Bundle insertion is not allowed on input bundles"); if (! toBeInserted.isValid()) { CARB_LOG_ERROR("Cannot insert an invalid bundle"); return; } if (! isValid()) { CARB_LOG_ERROR("Cannot insert into an invalid bundle"); return; } m_bundlePtr->copyBundle(toBeInserted.abi_bundleHandle()); reset(m_bundlePtr); } /** * Clear the entire bundle contents (outputs only). */ bool clear() { static_assert(!readOnly, "Clearing of input bundles is not allowed"); if (!isValid()) { CARB_LOG_ERROR("Cannot clear an invalid bundle"); return false; } clearAttributeCache(); return OMNI_SUCCEEDED(m_bundlePtr->clearContents(true)); } /** * Copy an attribute into the bundle. If no name is passed in then use the attribute's current name. */ template <typename RuntimeAttributeType> bool insertAttribute(RuntimeAttributeType const& attributeToCopy, NameToken newName = omni::fabric::kUninitializedToken) { static_assert(!readOnly, "Attribute insertion is not allowed on input bundles"); if (!isValid()) { CARB_LOG_ERROR("Cannot insert into an invalid bundle"); return false; } clearAttributeCache(); AttributeDataHandle attrib = m_bundlePtr->copyAttribute(attributeToCopy.abi_handle(), true, newName); return attrib.isValid(); } /** * Create a new attribute in the bundle. * * @param[in] attributeName Name for the new attribute * @param[in] attributeType Base type for the attribute * @param[in] elementCount If an array type then this is the initial element count * @return Runtime attribute wrapper for the newly created attribute */ runtime_t addAttribute(NameToken const& attributeName, Type const& attributeType, size_t elementCount = 0) { static_assert(!readOnly, "Attribute addition is not allowed on input bundles"); if (! isValid()) { CARB_LOG_ERROR("Cannot add to an invalid bundle"); return runtime_t(); } clearAttributeCache(); auto attribHandle = m_bundlePtr->createAttribute(attributeName, attributeType, elementCount); return runtime_t(m_bundlePtr->getContext(), attribHandle); } /** * Add a batch of attributes to a bundle prim. * * @param[in] attributeCount Number of attributes to be added * @param[in] attrNames Array of names for the new attributes * @param[in] attrTypes Array of types for the new attributes * @return Whether addition was successful */ bool addAttributes(size_t attributeCount, NameToken const* attributeNames, Type const* attributeTypes) { static_assert(!readOnly, "Attribute addition is not allowed on input bundles"); if (! isValid()) { CARB_LOG_ERROR("Cannot add attributes to an invalid bundle"); return false; } clearAttributeCache(); size_t createdCount = 0; auto result = m_bundlePtr->createAttributes(attributeNames, attributeTypes, attributeCount, nullptr /*elementCount*/, nullptr /*createdAttributes*/, &createdCount); if (OMNI_FAILED(result)) return false; return attributeCount == createdCount; } /** * Add a batch of child bundles to this bundle. * * @param childCount Number of children to be added * @param childNames Array of names for the new children * @param childHandles Output handles of child bundles, 'nullptr' can be passed if no output is required * @return Whether addition was successful */ bool addChildBundles(size_t childCount, NameToken const* childNames, BundleHandle* childHandles = nullptr) { static_assert(!readOnly, "Attribute addition is not allowed on input bundles"); if (!isValid()) { CARB_LOG_ERROR("Cannot add children to an invalid bundle"); return false; } size_t createdCount = 0; auto result = m_bundlePtr->createChildBundles(childNames, childCount, childHandles, &createdCount); if (OMNI_FAILED(result)) return false; return childCount == createdCount; } /** * Remove an existing attribute from the bundle. * Silently succeeds if an attribute with the given name did not exist on the bundle * * @param[in] attributeName Name of the attribute to remove */ bool removeAttribute(NameToken const& attributeName) { return removeAttributes(1, &attributeName); } /** * Remove a batch of attributes from a bundle prim. * * @param[in] attributeCount Number of attributes to be removed * @param[in] attrNames Array of names to be removed * @return Whether removal was successful */ bool removeAttributes(size_t attributeCount, NameToken const* attributeNames) { static_assert(!readOnly, "Attribute removal is not allowed on input bundles"); if (!isValid()) { CARB_LOG_ERROR("Cannot remove attributes to an invalid bundle"); return false; } clearAttributeCache(); size_t removedCount = 0; auto result = m_bundlePtr->removeAttributesByName(attributeNames, attributeCount, &removedCount); if (OMNI_FAILED(result)) return false; return removedCount == attributeCount; } }; //! ====================================================================== //! @class BundleChanges //! @brief This class is designed for inspecting modifications within a bundle during its lifetime. //! //! The BundleChanges class enables the inspection of changes in a bundle's attributes and child bundles //! during the lifetime of the BundleChanges instance. It keeps a record of modifications that have occurred, //! providing a suite of functionalities to inspect these changes. //! //! An integral feature of BundleChanges is its automatic clearing of changes upon destruction, //! i.e., when the instance goes out of scope. This ties the lifetime of the recorded changes tightly //! with the BundleChanges instance, ensuring the changes do not persist beyond the intended scope. template <bool readOnly> class BundleChanges { using BundleHandle_t = std::conditional_t<readOnly, ConstBundleHandle, BundleHandle>; public: BundleChanges(omni::core::ObjectPtr<IBundleChanges> const& changes, BundleHandle_t handle, bool clearAtExit = true) : m_bundleChanges(changes), m_bundleHandle(handle), m_clearAtExit(clearAtExit) { CARB_ASSERT(m_bundleChanges); } BundleChanges(BundleChanges const&) = delete; BundleChanges(BundleChanges&&) = default; BundleChanges& operator=(BundleChanges const&) = delete; BundleChanges& operator=(BundleChanges&&) = default; ~BundleChanges() { if (m_clearAtExit) { clearChanges(); } } //! @brief Activates the change tracking system for a bundle. //! //! This method controls the change tracking system of a bundle. It's only applicable //! for read-write bundles (when readOnly template parameter is false). For read-only //! bundles, this method will cause a compilation error if called. //! //! @throws A static_assert error at compile-time if the method is called on a //! read-only bundle. void activate() noexcept { static_assert(!readOnly, "Can't activate change tracking for read-only bundle."); CARB_ASSERT(m_bundleChanges); m_bundleChanges->activateChangeTracking(m_bundleHandle); } //! @brief Deactivates the change tracking system for a bundle. //! //! This method controls the change tracking system of a bundle. It's only applicable //! for read-write bundles (when readOnly template parameter is false). For read-only //! bundles, this method will cause a compilation error if called. //! //! @throws A static_assert error at compile-time if the method is called on a //! read-only bundle. void deactivate() noexcept { static_assert(!readOnly, "Can't activate change tracking for read-only bundle."); CARB_ASSERT(m_bundleChanges); m_bundleChanges->deactivateChangeTracking(m_bundleHandle); } //! @brief Implicit conversion to bool. //! //! This operator allows an instance of BundleChanges to be automatically converted to a bool. //! The boolean value indicates whether the bundle has undergone any changes within its lifetime. //! It leverages the hasChanged() method to provide this information. //! //! @returns True if the bundle has changed; false otherwise. operator bool() noexcept { return hasChanged(); } //! @brief Clears the recorded changes. //! //! This method is used to manually clear the recorded changes of the bundle. omni::core::Result clearChanges() noexcept { CARB_ASSERT(m_bundleChanges); return m_bundleChanges->clearChanges(); } //! @brief Checks if the bundle has changed. //! //! This method is used to check if any changes have been made to the bundle's attributes or child bundles //! within the lifetime of the BundleChanges instance. //! //! @returns True if the bundle has changed; false otherwise. bool hasChanged() noexcept { CARB_ASSERT(m_bundleChanges); return m_bundleChanges->getChange(m_bundleHandle) != BundleChangeType::None; } //! @brief Retrieves the change status of a specific attribute. //! //! This method is used to check if a specific attribute of the bundle has been modified //! within the lifetime of the BundleChanges instance. //! //! @param attribute The specific attribute of the bundle to check for modifications. //! //! @returns True if the specified attribute has changed; false otherwise. template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType> BundleChangeType getChange(RuntimeAttribute<AttributeType, MemoryType, GpuPtrType> const& attribute) noexcept { CARB_ASSERT(m_bundleChanges); auto const handle = attribute.abi_handle(); return m_bundleChanges->getChange(handle); } //! @brief Retrieves the change status of a specific bundle. //! //! This method is used to check if a specific bundle or its contents have been modified //! within the lifetime of the BundleChanges instance. //! //! @param bundle The specific bundle to check for modifications. //! //! @returns A BundleChangeType value indicating the type of change (if any) that has occurred to the specified //! bundle. template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType> BundleChangeType getChange(BundleContents<AttributeType, MemoryType, GpuPtrType> const& bundle) noexcept { CARB_ASSERT(m_bundleChanges); auto const handle = bundle.abi_bundleHandle(); return m_bundleChanges->getChange(handle); } //! @brief Retrieves the change status of a specific bundle or attribute using its handle. //! //! This function is used to check if a specific bundle or attribute, identified by its handle, has been modified //! within the lifetime of the BundleChanges instance. //! //! @tparam HANDLE_TYPE The type of the handle (ConstBundleHandle or ConstAttributeDataHandle). //! @param handle The handle to the specific bundle or attribute to check for modifications. //! //! @returns A BundleChangeType value indicating the type of change (if any) that has occurred to the bundle or //! attribute associated with the specified handle. template <typename HANDLE_TYPE> BundleChangeType abi_getChange(HANDLE_TYPE const& handle) noexcept { constexpr auto isBundle = std::is_same<HANDLE_TYPE, ConstBundleHandle>::value || std::is_same<HANDLE_TYPE, BundleHandle>::value; constexpr auto isAttrib = std::is_same<HANDLE_TYPE, ConstAttributeDataHandle>::value || std::is_same<HANDLE_TYPE, AttributeDataHandle>::value; static_assert(isBundle || isAttrib, "Unsupported handle type for abi_getChange!"); CARB_ASSERT(m_bundleChanges); return m_bundleChanges->getChange(handle); } private: omni::core::ObjectPtr<IBundleChanges> m_bundleChanges; BundleHandle_t m_bundleHandle; bool m_clearAtExit; }; // ====================================================================== /** * Template class responsible for managing the interaction with bundle type input attributes. * It wraps the bundle in an interface with a more natural interaction than the raw ABI calls. * * <AttributeType> How the attribute is interpreted - input, output, or state value * <MemoryType> where the memory for the attributes in this bundle will live (CPU, GPU, or decided at runtime) */ template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct BundleAttribute { //friend with other templates template <eAttributeType, eMemoryType, PtrToPtrKind> friend struct BundleAttribute; private : //! Aliases used during overload resolution to differentiate between read-only and read-write. using roTag_t = std::true_type; using rwTag_t = std::false_type; // The writability of a bundle will determine what kinds of operations can be performed on it static constexpr bool readOnly = (AttributeType == kOgnInput); using readOnly_t = std::conditional_t<AttributeType == kOgnInput, roTag_t, rwTag_t>; using bundleHandle_t = std::conditional_t<readOnly, ConstBundleHandle, BundleHandle>; /** * Construct bundle interface based on provided context and bundle handle. * * @param context Evaluation context. * @param handle Bundle handle. */ auto getInterface(rwTag_t, GraphContextObj const& context, bundleHandle_t handle) const { auto iComputeGraph = carb::getCachedInterface<ComputeGraph>(); auto factory = iComputeGraph->getBundleFactoryInterfacePtr(); return factory->getBundle(context, handle); } auto getInterface(roTag_t, GraphContextObj const& context, bundleHandle_t handle) const { auto iComputeGraph = carb::getCachedInterface<ComputeGraph>(); auto factory = iComputeGraph->getBundleFactoryInterfacePtr(); return factory->getConstBundle(context, handle); } /** * Bundle attributes always live on the CPU since they are always small, containing only a single value through * which their contents can be referenced. The memory type is passed down though, to provide the appropriate * interfaces to the attributes within the bundle. * * @param[in] index In vectorized context, the instance index to access * @return The corresponding bundle handle */ bundleHandle_t bundleHandle(size_t index = 0) const { bundleHandle_t* rel = m_bundleHandleArrayPtr ? m_bundleHandleArrayPtr[m_offset + index] : nullptr; //TODO: multiple input rel return rel ? rel[0] : bundleHandle_t{ bundleHandle_t::invalidValue() }; } // -------------------------------------------------------------------------------------------------------------- //! Data members //! Pointer to the vectorized set of data bundleHandle_t** m_bundleHandleArrayPtr{ nullptr }; //! ABI OmniGraph context object const GraphContextObj* m_context{ nullptr }; //! Interface to the bundle data, constructed on demand BundleContents<AttributeType, MemoryType, GpuPtrType> m_bundle; //! In vectorized context, offset at which we should read our handle size_t const& m_offset; //! Top level bundle change tracking omni::core::ObjectPtr<IBundleChanges> m_bundleChanges; public: // -------------------------------------------------------------------------------------------------------------- /** * Set up the accessor for output attributes with Bundle data */ BundleAttribute(size_t const& offset) : m_offset(offset) { } /** * Bundle attributes always live on the CPU since they are always small, containing only a single value through * which their contents can be referenced. The memory type is passed down though, to provide the appropriate * interfaces to the attributes within the bundle. * * @param[in] index In vectorized context, the instance index to access * @return Reference to the raw fabric data. */ BundleContents<AttributeType, MemoryType, GpuPtrType>& operator()(size_t index = 0) { CARB_ASSERT(context()); m_bundle.reset(*context(), bundleHandle(index)); return m_bundle; } /** * Set the evaluation context for the attribute to allow later access. The data isn't available at construction * time so this method is provided to add it in when it becomes available. * * @param[in] contextObj OmniGraph context object to which this attribute belongs */ void setContext(const GraphContextObj& contextObj) { m_context = &contextObj; m_bundleChanges.release(); } /** * Set the attribute handle for input bundles * * @param[in] handle Handle to the attribute to which the bundle belongs */ void setHandle(ConstAttributeDataHandle handle) { m_context->iAttributeData->getDataR((const void**)&m_bundleHandleArrayPtr, *m_context, &handle, 1); m_bundleChanges.release(); } /** * Retrieve the context object * */ GraphContextObj const* context() const { return m_context; } /** * @param[in] index In vectorized context, the instance index to access * @return The path to the bundle data */ char const* path(size_t index = 0) const { auto bundlePtr = getInterface(readOnly_t{}, *context(), bundleHandle(index)); if (!bundlePtr) { return nullptr; } omni::fabric::PathC path = bundlePtr->getPath(); return carb::getCachedInterface<omni::fabric::IPath>()->getText(path); } /** * @param[in] index In vectorized context, the instance index to access * @return True if the underlying attribute data is valid for accessing */ bool isValid(size_t index = 0) const { return bundleHandle(index).isValid(); } /** * Assignment operator is only active for writable bundle contents (i.e. outputs). * At this level if the data is being stolen it redirects the output to point to the input, otherwise it * copies the entire bundle. * * @note Any accessors to this bundle (operator()) must be called after the assignment or they will be invalid * * @param[in] toBeCopied Bundle attribute from which this one will be redirected or copied * @returns Reference to this bundle */ template <eAttributeType AttributeTypeToCopy, eMemoryType MemoryTypeToCopy, PtrToPtrKind GpuPtrTypeToCopy = PtrToPtrKind::eNotApplicable> BundleAttribute<AttributeType, MemoryType, GpuPtrType>& operator=(const BundleAttribute<AttributeTypeToCopy, MemoryTypeToCopy, GpuPtrTypeToCopy>& toBeCopied) { static_assert(!readOnly, "Assignment is not allowed on input bundles"); if (!context() || !toBeCopied.isValid()) { CARB_LOG_ERROR_ONCE("Could not assign to or from invalid bundle attribute"); return *this; } auto bundlePtr = getInterface(readOnly_t{}, *context(), bundleHandle()); if (bundlePtr) { bundlePtr->clearContents(true); bundlePtr->copyBundle(toBeCopied.bundleHandle()); } return *this; } //! @brief Retrieves the `BundleChanges` object. //! //! The `changes` function returns the `BundleChanges` object associated with requested instance. //! It allows access to the bundle change tracking. //! //! @return Returns the `BundleChanges` object associated with requested instance. BundleChanges<readOnly> changes(size_t instanceIndex = 0, bool clearAtExit = true) noexcept { CARB_ASSERT(context()); if (!m_bundleChanges) { m_bundleChanges = carb::getCachedInterface<ComputeGraph>()->getBundleChangesInterfacePtr(*context()); } return { m_bundleChanges, bundleHandle(instanceIndex), clearAtExit }; } }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/SimpleRuntimeAttribute.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/ogn/RuntimeAttribute.h> #include <omni/graph/core/ogn/SimpleAttribute.h> namespace omni { namespace graph { namespace core { namespace ogn { template <typename RTAttrib> struct SimpleRTAttrib { using dataHandle_t = typename RTAttrib::dataHandle_t; using attrib_t = typename std::remove_const<RTAttrib>::type; using this_t = SimpleRTAttrib<RTAttrib>; private: size_t const& m_offset; attrib_t m_attrib; size_t m_rootHandleOffset{0}; bool m_dirty{ false }; AttributeObj m_dirtyPendingObj; public: SimpleRTAttrib(size_t const& offset) : m_offset(offset) {} SimpleRTAttrib(SimpleRTAttrib const& other) = default; SimpleRTAttrib(SimpleRTAttrib&&) = default; SimpleRTAttrib& operator=(SimpleRTAttrib const& other) { const_cast<size_t&>(m_offset) = other.m_offset; m_attrib = other.m_attrib; m_rootHandleOffset = other.m_rootHandleOffset; m_dirty = other.m_dirty; m_dirtyPendingObj = other.m_dirtyPendingObj; return *this; } SimpleRTAttrib& operator=(SimpleRTAttrib&&) = default; /** * @return Reference to the inner runtime attribute */ RTAttrib& operator()(size_t index = 0) { static const dataHandle_t kInvalidHandle = dataHandle_t{ dataHandle_t::invalidValue() }; if (m_dirty) { auto hdl = m_dirtyPendingObj.iAttribute->getAttributeDataHandle(m_dirtyPendingObj, { m_rootHandleOffset }); m_attrib.reset(m_attrib.abi_context(), hdl, m_dirtyPendingObj); m_dirtyPendingObj = { nullptr, kInvalidAttributeHandle }; m_dirty = false; } //if our attrib already points to something... if (m_attrib.abi_handle() != kInvalidHandle) { //check that this something is actually the asked instance auto offset = index + m_offset; if (offset != m_rootHandleOffset) { //if not, make it point to the proper instance m_attrib.adjustHandle(offset - m_rootHandleOffset); //... and keep trace of which instance is currently pointed m_rootHandleOffset = offset; } } return m_attrib; } RTAttrib const& operator()(size_t index = 0) const { return const_cast<this_t*>(this)->operator()(index); } /** * @return True if the attribute can be accessed for vectorized compute */ bool const canVectorize() const { return m_attrib.canVectorize(); } void const fetchDetails(const AttributeObj& attr) { //This is called on type resolution changes, lots of other changes are probably going to happen on the graph // So do not fetch anything right away at this would force a useless graph bucket creation, // and just defer the fetch on first access m_dirty = true; m_dirtyPendingObj = attr; } }; template <eAttributeType AttributeType, eMemoryType MemoryType, eMemoryType DUMMY, PtrToPtrKind GpuPtrType> struct SimpleInput<const RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>, DUMMY> : public SimpleRTAttrib< const RuntimeAttribute<AttributeType, MemoryType, GpuPtrType> > { using super = SimpleRTAttrib< const RuntimeAttribute<AttributeType, MemoryType, GpuPtrType> >; SimpleInput(size_t const& offset) : super(offset) {} }; template <eAttributeType AttributeType, eMemoryType MemoryType, eMemoryType DUMMY, PtrToPtrKind GpuPtrType> struct SimpleWritableAttribute<RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>, AttributeType, DUMMY> : public SimpleRTAttrib< RuntimeAttribute<AttributeType, MemoryType, GpuPtrType> > { using super = SimpleRTAttrib< RuntimeAttribute<AttributeType, MemoryType, GpuPtrType> >; SimpleWritableAttribute(size_t const& offset) : super(offset) {} }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/State.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/iComputeGraph.h> #include <tbb/concurrent_hash_map.h> #include <memory> // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // StateManager Class that automatically manages per-node state data // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== /** * This class structure provides support for internal state data. The reason for such a tricky implementation is to * make it easier to access from the node implementation code. At compute time only the database is accessible, so * the state information is best retrieved from there. At the same time the minimal amount of effort required to set * up state information is to put it in the node implementation class, so the database needs to know how the class is * structured and the class needs to know how the database is structured, leading to a recursive inclusion, not handled * well by C++. * * Instead of this a templated state manager is instantiated by the .ogn generation code that lives as a static member * of the generated database class. The template parameters hide the fact that the state exists in the not-yet-defined * node implementation class, at the cost of requiring the class be mentioned when retrieving the state information. * * Inside the node's compute method the state information is retrieved as: * auto& state = db.internalState<OgnMyNode>(); */ struct StateManager { // As the map from NodeHandle to the state information cannot know the template type (not yet defined as per the // above explanation) this base class is set up as a basic form of type hiding. This way the map can hold // references to the heterogeneous set of templated state information. This is safe because every NodeHandle will // uniquely reference a different type of state class, and the virtual base class hides that fact from the // non-templated part of this manager. struct StateHolderBase { virtual ~StateHolderBase() {} }; // The templated holder class provides access to the underlying state information. template <typename StateInformationClass> struct StateHolder : StateHolderBase { std::unique_ptr<StateInformationClass> m_stateObject; StateHolder() : m_stateObject{ std::make_unique<StateInformationClass>() } {} StateInformationClass& value() { return *m_stateObject; } }; using Key = std::pair<NodeHandle, size_t>;//pair of the node and the graph instance ID (ie. represent a unique node in a unique graph instance) using ConcurrentHashMap = tbb::concurrent_hash_map<Key, std::unique_ptr<StateHolderBase>>; using WriteAccessor = typename ConcurrentHashMap::accessor; // This method provides access to the template <typename StateInformationClass> StateInformationClass& getState(NodeHandle value, InstanceIndex idx = kAccordingToContextIndex) { // It would have been better to avoid having a state manager at all when no state exists but a simple method // of doing so was not found. This approach looked promising but suffered from the same circular dependency // that mandated the current approach: https://stackoverflow.com/questions/25492589/ // Here we at least let the node writer know that they need to add state for this call to make sense. static_assert(!std::is_empty<StateInformationClass>::value, "State class is empty"); Key k{ value, carb::getCachedInterface<INode>()->getGraphInstanceID(value, idx).token}; WriteAccessor writeIt; if(!m_managedState.find(writeIt, k)) { m_managedState.emplace(writeIt, k, std::make_unique<StateHolder<StateInformationClass>>()); } // This is the magic that lets a generic map correctly handle members of templated types. We've // constructed the class to guarantee that a reinterpret_cast will work, avoiding the more expensive // dynamic_cast, and since this method is templated the exact type for casting is known. return (reinterpret_cast<StateHolder<StateInformationClass>*>(writeIt->second.get()))->value(); } // As the lifespan of node handles is limited they must remove themselves from the state map when they are // released. Note that this is required due to the fact that the node implementation classes are not // themselves instantiated, except for this state management. The NodeHandle refers to a unique instantiation // of a node of a particular type in a unique OmniGraph evaluation context. // // This method does not need to be templated because the underlying StateHolder knows how to destroy its type. void removeState(NodeHandle value, NameToken instanceID) { Key k{ value, instanceID.token }; m_managedState.erase(k); } // Quick lookup of state information associated with a node, using the base class type to mask the templated // type that lies beneath. Using a unique_ptr ensures it is properly destroyed when the node is released. ConcurrentHashMap m_managedState; }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/string.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // ogn::const_string Read-only access to a string in fabric // ogn::string Writable access to a string in fabric, with resizing and reallocation wrapped in // string-like functions // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= #include <gsl/string_span> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/ogn/Types.h> #include <algorithm> #include <string> #include <stdexcept> namespace omni { namespace graph { namespace core { namespace ogn { /** * std::string_view-like wrapper class for string attribute data in the Ogn Database. * As of this writing OGN is built on C++14 and std::string_view requires C++17 so the Microsoft forward * compatible implementaton of gsl::string_span<> is used. * * * This wrapper operates by using the Fabric interface to interact with string data it has stored. * The base string class provides common operations common to both const and non-const data. */ template <typename CharType, typename HandleType> class base_string : public base_array<CharType, HandleType> { public: /** * @return The number of elements in the currently managed array */ size_t length() const { return this->size(); } /** * Cast the string data to a std::string. * * After you do this the string data is now a copy of the original so you can manipulate it without * affecting the original, however it means you must reassign it back to a writable ogn::string if you * do want to change the string. * * @return String containing a copy of the internal raw string data */ operator std::string() const { auto const& spanObj = this->span(); if (spanObj.data()) { return std::string{ spanObj.data(), spanObj.size() }; } return std::string{}; } /** * See if two strings are equal. * * These simple comparison operators avoid extra conversions to std::string, to help minimize allocations * * @tparam IterableStringType String variant that supports iteration for comparison * @param[in] rhs String to compare against - just requires iterability * @return True if the strings have the same contents */ template <typename IterableStringType> bool operator==(const IterableStringType& rhs) const { return this->data() ? std::equal(this->begin(), this->end(), rhs.begin(), rhs.end()) : (rhs.begin() == rhs.end()); } /** * See if two strings are not equal. * * These simple comparison operators avoid extra conversions to std::string, to help minimize allocations * * @tparam IterableStringType String variant that supports iteration for comparison * @param[in] rhs String to compare against - just requires iterability * @return True if the strings do not have the same contents */ template <typename IterableStringType> bool operator!=(const IterableStringType& rhs) const { return this->data() ? !std::equal(this->begin(), this->end(), rhs.begin(), rhs.end()) : (rhs.begin() != rhs.end()); } /** * See if this string is lexicographically less than the provided one. * * These simple comparison operators avoid extra conversions to std::string, to help minimize allocations * * @tparam IterableStringType String variant that supports iteration for comparison * @param[in] rhs String to compare against - just requires iterability * @return True if this string is lexicographically less than the provided one */ template <typename IterableStringType> bool operator<(const IterableStringType& rhs) const { return std::lexicographical_compare(this->begin(), this->end(), rhs.begin(), rhs.end()); } /** * See if this string is lexicographically greater than the provided one. * * These simple comparison operators avoid extra conversions to std::string, to help minimize allocations * * @tparam IterableStringType String variant that supports iteration for comparison * @param[in] rhs String to compare against - just requires iterability * @return True if this string is lexicographically greater than the provided one */ template <typename IterableStringType> bool operator>(const IterableStringType& rhs) const { return std::lexicographical_compare(rhs.begin(), rhs.end(), this->begin(), this->end()); } /** * See if this string is lexicographically less than or equal to the provided one. * * These simple comparison operators avoid extra conversions to std::string, to help minimize allocations * * @tparam IterableStringType String variant that supports iteration for comparison * @param[in] rhs String to compare against - just requires iterability * @return True if this string is lexicographically less than or equal to the provided one */ template <typename IterableStringType> bool operator<=(const IterableStringType& rhs) const { return operator<(rhs) || operator==(rhs); } /** * See if this string is lexicographically greater than or equal to the provided one. * * These simple comparison operators avoid extra conversions to std::string, to help minimize allocations * * @tparam IterableStringType String variant that supports iteration for comparison * @param[in] rhs String to compare against - just requires iterability * @return True if this string is lexicographically greater than or equal to the provided one */ template <typename IterableStringType> bool operator>=(const IterableStringType& rhs) const { return operator>(rhs) || operator==(rhs); } }; /** * std::string_view-like wrapper class for constant string attribute data in the Ogn Database. * It operates by using the Fabric interface to interact with string data it has stored. * This const version of the string wrapper should be used for input attributes, whose data cannot be changed. * */ class const_string : public base_string<const char, ConstAttributeDataHandle> { // from regular attributes template <typename, eAttributeType, eMemoryType, PtrToPtrKind> friend struct ArrayAttribute; // from runtime attributes template <typename, bool, eMemoryType, PtrToPtrKind> friend struct ArrayData; public: /** * Default constructor to start pointing to nothing. */ const_string() = default; }; /** * std::string_view-like class for string output attribute data in the Ogn Database. * It operates by using the Fabric interface to interact with string data it has stored. * This non-const version of the string wrapper should be used for output attributes, whose data will be changed. * */ class string : public base_string<char, AttributeDataHandle> { public: /** Type of the parent class */ using parent_t = base_string<char, AttributeDataHandle>; // Make non-templated functions available to pass 1 of template resolution // http://www.gotw.ca/gotw/087.htm using base_string::data; using base_string::size; using base_string::reset; /** * Default constructor to start pointing to nothing. */ string() = default; /** * Set the size of the array data to a new value. * * This may or may not relocate memory. If anything is holding a raw pointer from contents() * that pointer should be refreshed by calling contents() again when this method returns. * * @param[in] newCount New element count of the array. */ void resize(size_t newCount) { CUDA_SAFE_ASSERT(isValid()); const IAttributeData& iData = *(this->context()->iAttributeData); iData.setElementCount(*this->context(), this->handle(), newCount); this->setDirty(); } /** * @return Non-const pointer to the raw string data */ char* data() { return const_cast<char*>(this->span().data()); } /** * Access a specific character in the string. No bounds checking is performed. * * @param[in] index Element index into the string * @return Reference to the index'th character of the string */ char& operator[](size_t index) { return const_cast<char&>(this->operator[](index)); } /** * Access a specific character in the string. Bounds checking is performed. * * @param[in] index Element index into the string * @return Reference to the index'th character of the string */ char& at(size_t index) { return const_cast<char&>(this->at(index)); } /** * Assignment of a raw sized string to this object, used by all variations. * * This will create a duplicate of all string members and reset the references into the Fabric, leaving the * context and the handle unchanged. * * @param[in] dataToCopy The string being copied in * @param[in] charCount The number of characters in the string being copied * @return Pointer to this */ string& assign(char const* dataToCopy, size_t charCount) { CUDA_SAFE_ASSERT(isValid()); // Resize this string first so that it has the proper space to receive the new data resize(charCount); if (charCount == 0) { // No work to do when the new string is empty return *this; } std::memcpy(data(), dataToCopy, charCount); return *this; } /** * Generic string assignment operator * * This template handles all compatible string types with a data() and size() method. At minimum * ogn::string, ogn::const_string, std::string, const std::string * * @param[in] rhs The string being copied in * @return Pointer to this */ template <typename StringType> string& operator=(StringType& rhs) { return assign(rhs.data(), rhs.size()); } /** * const char* assignment operator (avoids alloc, and disambiguates between std::string and ogn::const_string) * * This will create a duplicate of all string members and reset the references into the Fabric, leaving the * context and the handle unchanged. * * @param[in] rhs The string being copied in * @return Pointer to this */ string& operator=(const char* rhs) { return assign(rhs, rhs ? std::strlen(rhs) : 0); } /** * Standard method to clear a string, i.e. assign the empty string to it. * Since the null terminator is not needed in fabric the string truly is empty */ void clear() { resize(0); } }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/AttributeInitializer.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/ogn/Types.h> // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // AttributeInitializer Generated code uses this to store unchanging attribute information // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= namespace omni { namespace graph { namespace core { namespace ogn { // Determine if the underlying data type can be copy-constructed or if it has to use a raw memcpy template<class DataType> constexpr bool needs_memcpy_v = ! std::is_copy_constructible<DataType>::value; // Copy an array into the default value, where the array elements are copy-constructible template <typename FromDataType, typename ToDataType, std::enable_if_t<! needs_memcpy_v<std::remove_pointer_t<ToDataType>>, int> = 0> void copyArrayValue(FromDataType from, ToDataType& to, size_t elementCount) { if (elementCount == 0) { to = nullptr; return; } to = new std::remove_pointer_t<ToDataType>[elementCount]; for (size_t i=0; i<elementCount; ++i) { to[i] = from[i]; } } // Copy an array into the default value, where the array elements are not copy-constructible and have to be memcpy'd template <typename FromDataType, typename ToDataType, std::enable_if_t<needs_memcpy_v<std::remove_pointer_t<ToDataType>>, int> = 0> void copyArrayValue(FromDataType from, ToDataType& to, size_t elementCount) { if (elementCount == 0) { to = nullptr; return; } to = new std::remove_pointer_t<ToDataType>[elementCount]; memcpy(to, from, elementCount * sizeof(std::remove_pointer_t<ToDataType>)); } template <typename DataType, std::enable_if_t<std::is_pointer<DataType>::value, int> = 0> void destroyArrayValue(DataType& defaultValuePointer) { delete [] defaultValuePointer; } template <typename DataType, std::enable_if_t<! std::is_pointer<DataType>::value, int> = 0> void destroyArrayValue(DataType&) { } // ====================================================================== /** * Templated class containing the minimal amount of information required to * manage the information on attributes that is independent of evaluation context. * Some of the structures are a little non-standard, intentionally so to make * access to the attribute and node type ABI easier. * * The DataType of the attribute is solely used for storing its default value. For single values the default member is * an instance of the attribute data type. For arrays it is a pointer to the type, used in conjunction with an element * count to determine the size of the array. * * None of this interface should be called directly; only from generated code. * The "attributeType" template parameter is used to selectively enable or disable features * that are specific to input, output, or state types (e.g. the INodeType method addInput/addOuput/addState) */ template <typename DataType, ogn::eAttributeType attributeType = ogn::kOgnInput> struct AttributeInitializer { // This is needed due to the way const types are specified. add_const(int*) -> "int* const", not "int const*" using DefaultParameterType = std::conditional_t< std::is_pointer<DataType>::value, std::add_pointer_t<std::add_const_t<std::remove_pointer_t<DataType>>>, std::add_const_t<DataType>& >; using DefaultMemberType = std::conditional_t< std::is_pointer<DataType>::value, std::add_pointer_t<std::remove_const_t<std::remove_pointer_t<DataType>>>, std::remove_const_t<DataType> >; // TODO: Much of this information is already stored internally; there should be a way to avoid this duplication const char* m_name{ nullptr }; //!< Unique name of this attribute const char* m_dataType{ nullptr }; //!< Identifying name for the data type of the attribute NameToken m_token; //!< Unique token for this attribute's name DefaultMemberType m_defaultValue; //!< Default value for this attribute size_t m_elementCount{ 0 }; //!< Element count for the attribute values type ExtendedAttributeType m_extendedType{kExtendedAttributeType_Regular}; //!< Extended type of this attribute bool m_isRequired{ true }; //!< Is the attribute required? bool m_hasDefaultValue{ false }; //!< Was a default value explicitly set? bool m_hasElementCount{ false }; //!< Was an element count value explicitly set? /** * This interface is helpful in nodes, to make it easier to find attributes by name. * Internal storage may change but this method will continue to exist. * @return Pointer to raw string with attribute name in it */ const char* name() const { return m_name; } /** * This interface is helpful in nodes, to make it easier to find attributes by name * Internal storage may change but this method will continue to exist. * @return Token with the attribute name in it */ NameToken const& token() const { return m_token; } /** * Destroy the allocated default value */ ~AttributeInitializer() { if (m_hasElementCount) { destroyArrayValue<DefaultMemberType>(m_defaultValue); } } /** * Minimal constructor for attributes with no defaults * * @param[in] attributeName Name of the attribute to be accessed * @param[in] dataTypeName Name of the attribute's data type * @param[in] extendedType Extended type of the attribute */ AttributeInitializer(const char* attributeName, const char* dataTypeName, ExtendedAttributeType extendedType) : m_name(attributeName) , m_dataType(dataTypeName) , m_extendedType(extendedType) { } /** * Constructor for attributes with a default value but no element counts (i.e. not arrays) where the type supports * a copy constructor. There is a specialization of types for strings as "const char*" is not equivalent to * "char const*", which is required to initialize strings from a raw string. * * @param[in] Name of the attribute to be accessed * @param[in] Name of the attribute's data type * @param[in] Reference to the default value of the attribute's data type */ template <typename ConstructedDataType = DataType, typename std::enable_if_t<! needs_memcpy_v<ConstructedDataType>, int> = 0> AttributeInitializer(const char* attributeName, const char* dataTypeName, ExtendedAttributeType extendedType, DefaultParameterType newDefault) : m_name(attributeName) , m_dataType(dataTypeName) , m_defaultValue(newDefault) , m_extendedType(extendedType) , m_hasDefaultValue(true) { } /** * Constructor for attributes with a default value but no element counts (i.e. not arrays) where the type does not * support a copy constructor. There is a specialization of types for strings as "const char*" is not equivalent to * "char const*", which is required to initialize strings from a raw string. * * @param[in] Name of the attribute to be accessed * @param[in] Name of the attribute's data type * @param[in] Reference to the default value of the attribute's data type */ template <typename ConstructedDataType = DataType, typename std::enable_if_t<needs_memcpy_v<ConstructedDataType>, int> = 0> AttributeInitializer(const char* attributeName, const char* dataTypeName, ExtendedAttributeType extendedType, DefaultParameterType newDefault) : m_name(attributeName) , m_dataType(dataTypeName) , m_extendedType(extendedType) , m_hasDefaultValue(true) { // As this constructor is only called when there is no copy constructor it's reasonable to use a plain // memcpy to get the bytes from one object to another. They are required to be bytewise compatible. memcpy(&m_defaultValue, &newDefault, sizeof(ConstructedDataType)); } /** * Constructor for array attributes with a default value where the type supports a copy constructor * * @param[in] Name of the attribute to be accessed * @param[in] Name of the attribute's data type * @param[in] Reference to the default value of the attribute's data type * @param[in] Number of elements in the default value */ AttributeInitializer(const char* attributeName, const char* dataTypeName, ExtendedAttributeType extendedType, DefaultParameterType newDefault, size_t newElementCount) : m_name(attributeName) , m_dataType(dataTypeName) , m_defaultValue(nullptr) , m_elementCount(newElementCount) , m_extendedType(extendedType) , m_hasDefaultValue(true) , m_hasElementCount(true) { copyArrayValue<DefaultParameterType, DefaultMemberType>(newDefault, m_defaultValue, newElementCount); } /** * Set a default value after the fact; for types that could have problems with static initialization order * if they tried to do this in the constructor (e.g. NameToken) * * @param[in] newDefault Reference to the default value of the attribute's data type */ void setDefault(DefaultParameterType newDefault) { CARB_ASSERT(m_extendedType == kExtendedAttributeType_Regular, "Extended attribute types cannot set defaults"); m_hasDefaultValue = true; m_defaultValue = newDefault; } /** * Set a default array value after the fact; for types that could have problems with static initialization order * if they tried to do this in the constructor (e.g. NameToken[]) * * @param[in] newDefault Reference to the default value of the attribute's data type */ void setDefault(DefaultParameterType newDefault, size_t elementCount) { CARB_ASSERT(m_extendedType == kExtendedAttributeType_Regular, "Extended attribute types cannot set defaults"); if (m_hasDefaultValue && m_elementCount > 0 && m_defaultValue) { delete [] m_defaultValue; } m_hasDefaultValue = true; copyArrayValue<DefaultParameterType, DefaultMemberType>(newDefault, m_defaultValue, elementCount); m_elementCount = elementCount; } /** * Initializer that looks up the attribute name's token; must be done before using it. * This variation is for input attributes. * * @param[in] iNodeType Node type interface to which the attribute will be added as an input * @param[in] nodeTypeObj Node to which the attribute will be added as an input * @param[in] enable Template metaprogrammed value that enables this version of the method when the attribute is an input */ template <ogn::eAttributeType enabled = attributeType> void addToNodeType(const INodeType& iNodeType, const NodeTypeObj& nodeTypeObj, typename std::enable_if<enabled == ogn::kOgnInput>::type* = 0) { if (m_extendedType == kExtendedAttributeType_Regular) { iNodeType.addInput(nodeTypeObj, m_name, m_dataType, m_isRequired, m_hasDefaultValue ? &m_defaultValue : nullptr, m_hasElementCount ? &m_elementCount : nullptr); } else { iNodeType.addExtendedInput(nodeTypeObj, m_name, m_dataType, m_isRequired, m_extendedType); } } /** * Initializer that looks up the attribute name's token; must be done before using it. * This variation is for output attributes. * * @param[in] iNodeType Node type interface to which the attribute will be added as an output * @param[in] nodeTypeObj Node to which the attribute will be added as an output * @param[in] enable Template metaprogrammed value that enables this version of the method when the attribute is an output */ template <ogn::eAttributeType enabled = attributeType> void addToNodeType(const INodeType& iNodeType, const NodeTypeObj& nodeTypeObj, typename std::enable_if<enabled == ogn::kOgnOutput>::type* = 0) { if (m_extendedType == kExtendedAttributeType_Regular) { iNodeType.addOutput(nodeTypeObj, m_name, m_dataType, m_isRequired, m_hasDefaultValue ? &m_defaultValue : nullptr, m_hasElementCount ? &m_elementCount : nullptr); } else { iNodeType.addExtendedOutput(nodeTypeObj, m_name, m_dataType, m_isRequired, m_extendedType); } } /** * Initializer that looks up the attribute name's token; must be done before using it. * This variation is for state attributes. * * @param[in] iNodeType Node type interface to which the attribute will be added as a state * @param[in] nodeTypeObj Node to which the attribute will be added as a state * @param[in] enable Template metaprogrammed value that enables this version of the method when the attribute is a state */ template <ogn::eAttributeType enabled = attributeType> void addToNodeType(const INodeType& iNodeType, const NodeTypeObj& nodeTypeObj, typename std::enable_if<enabled == ogn::kOgnState>::type* = 0) { if (m_extendedType == kExtendedAttributeType_Regular) { iNodeType.addState(nodeTypeObj, m_name, m_dataType, m_isRequired, m_hasDefaultValue ? &m_defaultValue : nullptr, m_hasElementCount ? &m_elementCount : nullptr); } else { iNodeType.addExtendedState(nodeTypeObj, m_name, m_dataType, m_isRequired, m_extendedType); } } /** * Initialization for the attribute on the node type, happens once for any given attribute. * * @param[in] iToken Attribute's token, for lookup * @param[in] iNodeType Node type interface to which the attribute will be added * @param[in] nodeTypeObj Node to which the attribute will be added */ void initialize(fabric::IToken& iToken, const INodeType& iNodeType, const NodeTypeObj& nodeTypeObj) { m_token = iToken.getHandle(m_name); addToNodeType(iNodeType, nodeTypeObj); } }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/Registration.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Framework.h> #include <carb/logging/Log.h> #include <omni/graph/core/ogn/OmniGraphNodeABI.h> #include <functional> // ================================================================================================================= // This file contains helpers for facilitating automatic OGN node registration/deregistration. // // DECLARE_OGN_NODES() Added to your PluginInterface.cpp declarations where a static initializer would go // INITIALIZE_OGN_NODES() Added to your carbOnPluginStartup() or IExtensionHooks::onStartup_abi function // RELEASE_OGN_NODES() Added to your carbOnPluginShutdown() or IExtensionHooks::onShutdown_abi function // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= namespace omni { namespace graph { namespace core { namespace ogn { // ============================================================================================================== // This class has to be inlined or it would break ABI requirements. It is restricted to being instantiated and used // within a single extension to enforce that. class NodeTypeBootstrap { const char* m_nodeTypeName{ nullptr }; int m_version{ 1 }; const char* m_extensionName{ nullptr }; public: NodeTypeBootstrap(const char* nodeTypeName, int version, const char* extensionName); virtual NodeTypeABI& getABI() = 0; void registerNodeType(IGraphRegistry& iGraphRegistry) { getABI().registerNodeType(iGraphRegistry); } void deregisterNodeType(IGraphRegistry& iGraphRegistry) { getABI().deregisterNodeType(iGraphRegistry); } }; // ============================================================================================================== // // The registration/dregistration process code flow: // // DECLARE_OGN_NODES() establishes a per-plugin manifest of registration records for its extension // REGISTER_OGN_NODE(), added for each node type implementation, adds that node type's information to the manifest // INITIALIZE_OGN_NODES() walks every node type in the manifest and registers it with OmniGraph // RELEASE_OGN_NODES() walks every node type in the manifest and deregisters it with OmniGraph // // The code is set up to allow multiple calls to INITIALIZE_OGN_NODES()/RELEASE_OGN_NODES() within a plugin, however // the typical case will only do those on startup and shutdown. // // ============================================================================================================== // ============================================================================================================== // This class has to be inlined or it would break ABI requirements. It is restricted to being instantiated and used // within a single extension to enforce that. template <typename NodeTypeClass, typename NodeTypeDataClass> class NodeTypeBootstrapImpl : public NodeTypeBootstrap { OmniGraphNode_ABI<NodeTypeClass, NodeTypeDataClass> m_nodeTypeABI; public: NodeTypeBootstrapImpl(const char* nodeTypeName, int version, const char* extensionName) : NodeTypeBootstrap(nodeTypeName, version, extensionName) , m_nodeTypeABI(nodeTypeName, version, extensionName) {} NodeTypeABI& getABI() { return m_nodeTypeABI; } }; // ============================================================================================================== class NodeTypeManifest { std::vector<NodeTypeBootstrap*> m_registrationList; //< List of node type information used for (de)registration // Returns the interface to the registry, potentially using a supplied one (usually from omni.graph.core where // the usual method of getting a cached interface is not available) IGraphRegistry* getRegistryInterface(IGraphRegistry* potentialInterface, char const* processUsingIt) const { if (potentialInterface) { return potentialInterface; } auto iGraphRegistry = carb::getCachedInterface<omni::graph::core::IGraphRegistry>(); if (iGraphRegistry) { return iGraphRegistry; } // No interface is okay when there is no registration list if (m_registrationList.empty()) { return iGraphRegistry; } // This used to be an error, but it seems to get called on shutdown for some reason, where acquisition of // the registry interface is not possible, so log a message for history but do not fail, relying on the // caller to check for a nullptr return. CARB_LOG_INFO_ONCE("Failed to %s - could not acquire omni::graph::core::IGraphRegistry", processUsingIt); return nullptr; } public: // Add a new node type record to the manifest. Should happen as a result of the REGISTER_OGN_NODE() macro void add(NodeTypeBootstrap* newRecord) { m_registrationList.push_back(newRecord); } // Register void registerAll(IGraphRegistry* iGraphRegistry = nullptr) { iGraphRegistry = getRegistryInterface(iGraphRegistry, "register node types"); if (iGraphRegistry) { for (const auto& registration : m_registrationList) { if (registration) { registration->registerNodeType(*iGraphRegistry); } else { CARB_LOG_ERROR_ONCE("Empty registration entry found when registering all node types"); } } } } void deregisterAll(IGraphRegistry* iGraphRegistry = nullptr) { iGraphRegistry = getRegistryInterface(iGraphRegistry, "register node types"); if (iGraphRegistry) { for (const auto& registration : m_registrationList) { if (registration) { registration->deregisterNodeType(*iGraphRegistry); } else { CARB_LOG_ERROR_ONCE("Empty registration entry found when deregistering all node types"); } } } } }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni // ====================================================================== /** * In order to support delayed registration per-plugin each plugin must contain a list of * registration functions to be called on startup. To that end the first macro is instantiated * in the plugin interface file at the file-static level and the second is instantiated * inside the carbOnPluginStartup function. No instantiation is needed in the carbOnPluginShutdown * function as the registration object will take care of cleanup. The access is made through * a function call with a static member to prevent static initialization ordering problems. */ extern omni::graph::core::ogn::NodeTypeManifest& __nodeTypeManifest(); // This has to be separate from the class declaration since it would otherwise create a circular dependency between // NodeTypeManifest and NodeTypeBootstrap inline omni::graph::core::ogn::NodeTypeBootstrap::NodeTypeBootstrap(const char* nodeTypeName, int version, const char* extensionName) : m_nodeTypeName{nodeTypeName} , m_version(version) , m_extensionName(extensionName) { __nodeTypeManifest().add(this); } // This is included in the plugin definition file in the file-static declaration code block #define DECLARE_OGN_NODES() \ omni::graph::core::ogn::NodeTypeManifest& __nodeTypeManifest() \ { \ static omni::graph::core::ogn::NodeTypeManifest _nodeTypeManifest; \ return _nodeTypeManifest; \ } // This is added to the plugin startup to register all (C++) node types owned by the plugin #define INITIALIZE_OGN_NODES() __nodeTypeManifest().registerAll(); // This should be added to the plugin shutdown to ensure that all node types owned by the plugin are cleanly shut down #define RELEASE_OGN_NODES() __nodeTypeManifest().deregisterAll(); // ============================================================================================================== /* Everything below here was used on previous versions of generated code and should no longer be accessed. They only continue to exist here on the off chance that somebody was using them directly. _____ ______ _____ _____ ______ _____ _______ ______ _____ | __ \ | ____|| __ \ | __ \ | ____|/ ____| /\ |__ __|| ____|| __ \ | | | || |__ | |__) || |__) || |__ | | / \ | | | |__ | | | | | | | || __| | ___/ | _ / | __| | | / /\ \ | | | __| | | | | | |__| || |____ | | | | \ \ | |____| |____ / ____ \ | | | |____ | |__| | |_____/ |______||_| |_| \_\|______|\_____|/_/ \_\|_| |______||_____/ */ namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== // Define the type used for remembering the function to call for a delayed registration of a node type using DelayedRegistrationFn = std::function<const char*(OmniGraphNodeRegisterFn&, OmniGraphNodeDeregisterFn&, OmniGraphNodeRegisterAliasFn&)>; using DelayRegistrationFn = std::function<void(DelayedRegistrationFn)>; // ====================================================================== /** * Template for a class that manages registration of a node defined outside the omni.graph.core extension * The OmniGraph interface is assumed to be available as any extension registering nodes must have a dependency on it. * One of these objects must be statically instantiated for the node type registration to be properly managed. */ template <typename NodeTypeClass, typename NodeTypeDataClass> class RegisterNode : OmniGraphNode_ABI<NodeTypeClass, NodeTypeDataClass> { public: /** * Construct a registration object and register it for delayed registration when the plugin starts up * * @param[in] nodeTypeName Name of the node type being registered * @param[in] nodeTypeVersion Version of the node type being registered * @param[in] extensionName Name of the extension owning this node type * @param[in] delayRegistration Function to call to queue up a delayed registration in the plugin start-up code */ RegisterNode<NodeTypeClass, NodeTypeDataClass>(const char* nodeTypeName, int nodeTypeVersion, const char* extensionName, DelayRegistrationFn delayRegistration) : OmniGraphNode_ABI<NodeTypeClass, NodeTypeDataClass>() { // std::cout << "DBG: Delayed registration of " << nodeTypeName << " from " << extensionName << std::endl; using std::placeholders::_1; using std::placeholders::_2; using std::placeholders::_3; delayRegistration(std::bind(&RegisterNode<NodeTypeClass, NodeTypeDataClass>::performRegistration, this, nodeTypeName, nodeTypeVersion, extensionName, _1, _2, _3)); } /** * A binding of this function for every node type is added to a delayed registration list, which will be processed * when the plugin starts up. * * @param[in] nodeTypeName Name of the node type being registered * @param[in] nodeTypeVersion Version of the node type being registered * @param[in] extensionName Name of the extension owning this node type * @param[in] registerFn Function to call to register the node type with the OmniGraph * @param[in] deregisterFn Function to call to deregister the node type with the OmniGraph * @param[in] registerAliasFn Function to call to register an alternate name for the node type * @return The name of the node type as registered */ const char* performRegistration(const char* nodeTypeName, int nodeTypeVersion, const char* extensionName, OmniGraphNodeRegisterFn& registerFn, OmniGraphNodeDeregisterFn& deregisterFn, OmniGraphNodeRegisterAliasFn& registerAliasFn) { // std::cout << "--- Registered " << nodeTypeName << " from " << extensionName << std::endl; OmniGraphNode_ABI<NodeTypeClass, NodeTypeDataClass>::doRegister( nodeTypeName, nodeTypeVersion, extensionName, registerFn, deregisterFn, registerAliasFn); return nodeTypeName; } }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/ComputeHelpers.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // ================================================================================================================= // This file contains helper functions for building c++ nodes that perform the operations on union runtime attributes // with lots of different types. These helpers also support array broadcasting. This is especially useful // for generic utility nodes such as Add, Multiply, etc. // ================================================================================================================= #ifdef THIS_IS_INCLUDED_IN_THE_DOCUMENTATION // The folllowing block describes the ogn::compute API and how it is intended to be used. // // begin-compute-helpers-interface-description /* * In python, working with extended unions types is easy - dynamic type resolution is incredibly simple, and NumPy * handles array broadcasting, where a vector with smaller dimensions will be "repeated" so the inputs are compatible * the compute helpers API hopes to provide utilities that make working with C++ union types significantly easier */ // The compute helpers API primarily relies on using generic lambdas for operations. // The compiler will resolve the input types, allowing us to use the add function for a variety // of different types (eg: double, float, int) auto add = [](auto const& a, auto const& b, auto& result) { result = a + b; }; // We can use the ogn::compute::tryCompute function to attempt to apply add() to a variety of different types // tryCompute will return true if the types are resolved properly, and false if they aren't. an ogn::compute::InputError // will be thrown if the types are resolved but the computation is impossible (due to different array sizes, for example) if (ogn::compute::tryCompute(db.inputs.a().get<int>(), db.inputs.b().get<int>(), db.inputs.result().get<int>(), add)) return true; else if (ogn::compute::tryCompute(db.inputs.a().get<float>(), db.inputs.b().get<float>(), db.inputs.result().get<float>(), add)) return true; // For arrays, add() will be called with each input in parallel, ie: add(a[i], b[i], result[i]). else if (ogn::compute::tryCompute(db.inputs.a().get<double[]>(), db.inputs.b().get<double[]>(), db.inputs.result().get<double[]>(), add)) return true; // For a mixture of arrays and singulars, the singular will be broadcast: add(a[i], b, result[i]) else if (ogn::compute::tryCompute(db.inputs.a().get<double[]>(), db.inputs.b().get<double>(), db.inputs.result().get<double[]>(), add)) return true; else { db.logWarning("Failed to resolve input types"); return false; } /* * Sometimes we want to support a mix of arrays and singular values, using broadcasting to match the singular values * to the dimensions of the array. For this, we can use ogn::compute::tryComputeWithArrayBroadcasting(). */ // Assuming a, b, and result all have base type int, tryComputeUsingArrrayBroadcasting will attempt to resolve each // input to int or int[]. Then, perform broadcasting as necessary. if (ogn::compute::tryComputeUsingArrrayBroadcasting<int>(db.inputs.a(), db.inputs.b(), db.inputs.sum(), add)) return true; // Assumes a has base type int, b has base type float, and result has base type float else if (ogn::compute::tryComputeUsingArrrayBroadcasting<int, float, float>(db.inputs.a(), db.inputs.b(), db.inputs.sum(), add)) return true; // Also supports a random number of arguments, and a result. Here is an example with 3 arguments: else if (ogn::compute::tryComputeUsingArrrayBroadcasting<int>(db.inputs.a(), db.inputs.b(), db.inputs.c(), db.inputs.sum(), add3)) return true; else if (ogn::compute::tryComputeUsingArrrayBroadcasting<float, int, int, float>(db.inputs.a(), db.inputs.b(), db.inputs.c(), db.inputs.sum(), add3)) return true; /* * For tuple types, you'll have to change your lambda function to work with c++ fixed size arrays T[N] * Your lambda function will need to be specialized for each different N. * For this, I recommend using a helper function in your node implementation */ // Empty namespace to avoid multiple declarations at linking namespace { // compute helper assuming a scalar base type template<typename T> bool tryComputeAssumingType(db_type& db) { auto functor = [](auto const& a, auto const& b, auto& result) { result = a + b; }; return ogn::compute::tryComputeWithArrayBroadcasting<T>(db.inputs.a(), db.inputs.b(), db.outputs.sum(), functor); } // compute helper assuming a tuple base type template<typename T, size_t N> bool tryComputeAssumingType(db_type& db) { auto functor = [](auto const& a, auto const& b, auto& result) { for(size_t i = 0; i < N; i++) { result[i] = a[i] + b[i]; } }; return ogn::compute::tryComputeWithArrayBroadcasting<T[N]>(db.inputs.a(), db.inputs.b(), db.outputs.sum(), functor); } } // namespace // ... if (tryComputeAssumingType<int>(db)) return true; // Calls the scalar helper else if (tryComputeAssumingType<float>(db)) return true; else if (tryComputeAssumingType<int, 3>(db)) return true; // Calls the tuple helper else if (tryComputeAssumingType<float, 3>(db)) return true; /* * You may also want to support adding scalars to tuples. The above code unfortunately won't support that. * For Tuple broadcasting, you can use ogn::compute::tryComputeWithTupleBroadcasting. This function will * resolve each input to type T, T[], T[N] or T[N][], performing broadcasting as necessary. */ template<typename T, size_t N> bool tryComputeAssumingType(db_type& db) { auto functor = [](auto const& a, auto const& b, auto& result) { result = a + b; }; // Perform computation with tuple AND array broadcasting return ogn::compute::tryComputeWithTupleBroadcasting<T, N>(db.inputs.a(), db.inputs.b(), db.outputs.sum(), functor); } // end-compute-helpers-interface-description // #endif #pragma once #include <algorithm> #include <utility> #include <type_traits> #include <string> #include <exception> #include <omni/graph/core/PreUsdInclude.h> #include <pxr/base/gf/traits.h> #include <omni/graph/core/PostUsdInclude.h> #include <omni/graph/core/ogn/RuntimeAttribute.h> #include <omni/graph/core/ogn/SimpleAttribute.h> #include <omni/graph/core/ogn/ArrayAttribute.h> #include <omni/graph/core/ogn/array.h> namespace omni { namespace graph { namespace core { namespace ogn { namespace compute { /* Tuple Attribute type traits */ template<class T> using is_tuple_data = PXR_NS::GfIsGfVec<T>; // Error thrown when inputs are not valid struct InputError : public std::exception { std::string s; InputError(std::string ss) : s(ss) {} ~InputError() throw () {} const char* what() const throw() { return s.c_str(); } }; #include <omni/graph/core/ogn/ComputeHelpersDetails.h> #include <omni/graph/core/ogn/ComputeHelpersDynamicInputsDetails.h> /** ========================= TryCompute utility =========================== */ /** * Syntax: * tryCompute(inputs..., result, functor [, count]) * * Tries to apply the provided functor to the provided * arguments. * The number of inputs is random, and determined by the provided functor. * It is assumed that the "result" argument is a RuntimeAttribute * while inputs can be either RuntimeAttribute or direct POD/const_array<>. * * @param inputs: Random number of inputs * @param result: the last argument for the functor and the destination for the computation * @param functor: The computation to perform. Should have signature: (inputs const&..., result&) -> void * @param [Optional] count: Activates vectorized compute of 'count' elements * @return True if the inputs were resolved properly, false if not */ template <typename... Arguments> inline bool tryCompute(Arguments&&... args) { using VH = Private::VectorizationHelper<Arguments...>; return Private::tryCompute_ReverseHelper( std::forward_as_tuple(args...), std::make_index_sequence<sizeof...(Arguments) - 2 - VH::Offset>(), VH::count(std::forward_as_tuple(args...))); } /** ========================= TryComputeArray utility =========================== */ /** * Syntax: * tryComputeWithArrayBroadcasting<T>(inputs..., result, functor [, count]) * OR * tryComputeWithArrayBroadcasting<TypeIn0, TypeIn1,..., TypeResult>(inputs..., result, functor [, count]) * * Apply the functor to runtime/regular set of attributes, assuming they all have base types T (syntax #1), * or the provided set of types (syntax #2). * The result is assumed to be a runtime attribute. If if it a singular value, inputs must be as well. * If it is an array, each input is either an array or a singular value. * IE, this function will attempt to resolve each input to T or T[] etc. * Singular values will be broadcast to the length of the largest array in the input. * All input arrays must be of the same length, or an ogn::compute::InputError will be thrown * * @param inputs: Random number of inputs passed to the functor * @param result: the last argument for the functor and the destination for the computation * @param functor: The computation to perform. Should have signature: (inputs const&..., result&) -> void * @param [Optional] count: Activates vectorized compute of 'count' elements * @return True if the inputs were resolved properly, false if not */ template <typename... ComputeTypes, typename... Arguments> inline bool tryComputeWithArrayBroadcasting(Arguments&&... args) { using VH = Private::VectorizationHelper<Arguments...>; //Validation of template arguments number static_assert( sizeof...(ComputeTypes) == 1 /*single type*/ || sizeof...(ComputeTypes) == sizeof...(Arguments) - 1 /*functor*/ - VH::Offset, "Wrong number of template arguments provided to tryComputeWithArrayBroadcasting"); //Make the call return Private::tryComputeWithArrayBroadcasting_ReverseHelper<1, ComputeTypes...>( std::forward_as_tuple(args...), std::make_index_sequence<sizeof...(Arguments) - 2 /*functor and output*/ - VH::Offset>(), VH::count(std::forward_as_tuple(args...))); } /** ========================= TryComputeTuple utility =========================== */ // Public interface // Template argument(s): Single type + tuple components count, // or Type pack to apply to input/output + tuple components count // Arguments: input, input, input, ..., output, functor /** * Syntax: * #1: tryComputeWithTupleBroadcasting<SIZE, T>(inputs..., result, functor [, count]) * OR * #2: tryComputeWithTupleBroadcasting<SIZE, TypeIn0, TypeIn1,..., TypeResult>(inputs..., result, functor [, count]) * * Apply the functor to runtime/regular set of attributes, assuming they all have base types T (syntax #1), * or the provided set of types (syntax #2). * The result is assumed to be a runtime attribute. * If it is a singular value, inputs must be as well. * If it is a tuple[SIZE] value, inputs can be either a singular value or a tuple[SIZE]. * If it is an array of (tuple) values, each input is either an array of (tuple) value or a singular (tuple) value. * IE, function will attempt to resolve each input to T, T[SIZE], T[] or T[SIZE][] * Singular values will be broadcast to the length of the largest array in the input. * All input arrays must be of the same length, or an ogn::compute::InputError will be thrown * * @param inputs: Random number of inputs passed to the functor * @param result: the last argument for the functor and the destination for the computation * @param functor: The computation to perform. Should have signature: (inputs const&..., result&) -> void * @param [Optional] count: Activates vectorized compute of 'count' elements * @return True if the inputs were resolved properly, false if not */ /** ========================= UPGRADE NOTE / COMPILE ERROR ON EXISTING USAGE =========================== */ // Order of the template arguments has changed, starting in kit 105. // The tuple count is now the first argument instead of the last one. // // If you get there with a compile error, modify your code as follow: // // [OLD] tryComputeWithTupleBroadcasting<T0, T1, ..., N>(...) // [NEW] tryComputeWithTupleBroadcasting<N, T0, T1, ...>(...) /** ==================================================================================================== */ template <size_t N, typename... ComputeTypes, typename... Arguments> inline bool tryComputeWithTupleBroadcasting(Arguments&&... args) { using VH = Private::VectorizationHelper<Arguments...>; static_assert(sizeof...(ComputeTypes) == 1 /*single type*/ || sizeof...(ComputeTypes) == sizeof...(Arguments) - 1 /*functor*/ - VH::Offset, "Wrong number of template arguments provided to tryComputeWithTupleBroadcasting"); return Private::tryComputeWithArrayBroadcasting_ReverseHelper<N, ComputeTypes...>( std::forward_as_tuple(args...), std::make_index_sequence<sizeof...(Arguments) - 2 - VH::Offset>(), VH::count(std::forward_as_tuple(args...))); } /** ========================= TryComputeArrayWithMultipleInputs utility =========================== */ /** * Syntax: * tryComputeInputsWithArrayBroadcasting<T>(inputArray, result, functor [, count]) * * Apply the functor to an array of runtime attributes, assuming they all have base types T. * The result is assumed to be a runtime attribute. If if it a singular value, inputs must be as well. * If it is an array, each input is either an array or a singular value. * IE, this function will attempt to resolve each input to T or T[] etc. * Singular values will be broadcast to the length of the largest array in the input. * All input arrays must be of the same length, or an ogn::compute::InputError will be thrown. * * Note: The functor is an accumulator. It always receives one input and the result value * and expects the input to be iteratively added to the result. * The result is initialized with the first input before the functor is invoked. * * For example, to subtract all inputs from the first input the functor can be implemented as follows: * auto functor = [](const auto& input, auto& result) * { * result = result - input; * }; * * @param inputs: An array of runtime input attributes * @param result: The last argument for the functor and the destination for the computation * @param functor: The computation to perform. Should have signature: (input const&..., result&) -> void * @param [Optional] count: Activates vectorized compute of 'count' elements * @return True if the inputs were resolved properly, false if not */ template <typename ComputeType, typename Functor> inline bool tryComputeInputsWithArrayBroadcasting(gsl::span<ogn::RuntimeAttribute<ogn::kOgnInput, ogn::kCpu> const> const inputs, ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu> result, Functor functor, size_t count = 1) { using InputType = typename ogn::RuntimeAttribute<ogn::kOgnInput, ogn::kCpu>; return Private::tryComputeInputsWithArrayBroadcasting<ComputeType, InputType, Functor>(inputs, result, functor, count); } /** ========================= TryComputeTupleWithMultipleInputs utility =========================== */ /** * Syntax: * tryComputeInputsWithTupleBroadcasting<T>(inputArray, result, functor [, count]) * * Apply the functor to an array of runtime attributes, assuming they all have base types T. * The result is assumed to be a runtime attribute. * If it is a singular value, inputs must be as well. * If it is a tuple[SIZE] value, inputs can be either a singular value or a tuple[SIZE]. * If it is an array of (tuple) values, each input is either an array of (tuple) value or a singular (tuple) value. * IE, function will attempt to resolve each input to T, T[SIZE], T[] or T[SIZE][] * Singular values will be broadcast to the length of the largest array in the input. * All input arrays must be of the same length, or an ogn::compute::InputError will be thrown. * * Note: The functor is an accumulator. It always receives one input and the result value * and expects the input to be iteratively added to the result. * The result is initialized with the first input before the functor is invoked. * * For example, to subtract all inputs from the first input the functor can be implemented as follows: * auto functor = [](const auto& input, auto& result) * { * result = result - input; * }; * * @param inputs: An array of runtime input attributes * @param result: the last argument for the functor and the destination for the computation * @param functor: The computation to perform. Should have signature: (input const&..., result&) -> void * @param [Optional] count: Activates vectorized compute of 'count' elements * @return True if the inputs were resolved properly, false if not */ template <size_t TUPLE_SIZE, typename ComputeType, typename Functor> inline bool tryComputeInputsWithTupleBroadcasting(gsl::span<ogn::RuntimeAttribute<ogn::kOgnInput, ogn::kCpu> const> const inputs, ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu> result, Functor functor, size_t count = 1) { using InputType = typename ogn::RuntimeAttribute<ogn::kOgnInput, ogn::kCpu>; return Private::tryComputeInputsWithTupleBroadcasting<TUPLE_SIZE, ComputeType, InputType, Functor>(inputs, result, functor, count); } } // namespace compute } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/Database.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/InterfaceUtils.h> #include <omni/fabric/IToken.h> #include <carb/logging/Log.h> #include <omni/graph/core/IAttributeType.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/IVariable2.h> #include <omni/graph/core/StringUtils.h> #include <omni/graph/core/ogn/RuntimeAttribute.h> #include <omni/graph/core/ogn/SimpleRuntimeAttribute.h> using omni::fabric::IToken; // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // OmniGraphDatabase Base class for generated node database classes. Provides common functionality. // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= // Helper definitions for hardcoded metadata names. // These should match the Python constants in the MetadataKey object found in the file // source/extensions/omni.graph.tools/python/node_generator/keys.py #define kOgnMetadataAllowMultiInputs "allowMultiInputs" #define kOgnMetadataAllowedTokens "allowedTokens" #define kOgnMetadataAllowedTokensRaw "__allowedTokens" #define kOgnMetadataCategories "__categories" #define kOgnMetadataCategoryDescriptions "__categoryDescriptions" #define kOgnMetadataCudaPointers "__cudaPointers" #define kOgnMetadataDefault "__default" #define kOgnMetadataDescription "__description" #define kOgnMetadataExclusions "__exclusions" #define kOgnMetadataExtension "__extension" #define kOgnMetadataHidden "hidden" #define kOgnMetadataIconBackgroundColor "__iconBackgroundColor" #define kOgnMetadataIconBorderColor "__iconBorderColor" #define kOgnMetadataIconColor "__iconColor" #define kOgnMetadataIconPath "__icon" #define kOgnMetadataInternal "internal" #define kOgnMetadataLanguage "__language" #define kOgnMetadataMemoryType "__memoryType" #define kOgnMetadataObjectId "__objectId" #define kOgnMetadataOptional "__optional" #define kOgnMetadataOutputOnly "outputOnly" #define kOgnMetadataLiteralOnly "literalOnly" #define kOgnMetadataTags "tags" #define kOgnMetadataTokens "__tokens" #define kOgnMetadataUiName "uiName" #define kOgnMetadataUiType "uiType" #define kOgnSingletonName "singleton" namespace omni { namespace graph { namespace core { class Node; namespace ogn { // The following type aliases are internal and are meant to be used as opaque types. // The underlying type definition can change in future releases. /** Type of accessor returned for simple input attributes */ using InputAttribute = ogn::RuntimeAttribute<ogn::kOgnInput, ogn::kCpu>; /** Type of accessor returned for simple output attributes */ using OutputAttribute = ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu>; /** Type of accessor returned for simple state attributes */ using VariableAttribute = RuntimeAttribute<ogn::kOgnState, ogn::kCpu>; /** Type of accessor returned for simple dynamic input attributes */ using DynamicInput = ogn::SimpleInput<const InputAttribute, ogn::kCpu>; /** Type of accessor returned for simple dynamic output attributes */ using DynamicOutput = ogn::SimpleOutput<OutputAttribute, ogn::kCpu>; /** Type of accessor returned for simple dynamic state attributes */ using DynamicState = ogn::SimpleState<VariableAttribute, ogn::kCpu>; // ====================================================================== /** * Class defining the minimal amount of shared interface for the generated database interface classes. * This class should never be instatiated directly as it is not safe across ABI boundaries. Instead, * templated classes will derive from it to create new classes that live entirely on the plugin's side. * The template parameter NodeClassType is the node implementation class that makes use of the database. * This was necessary to avoid circular dependencies. */ class OmniGraphDatabase { protected: GraphContextObj const* m_graphContextHandles{ nullptr }; //!< ABI object referencing the OmniGraph NodeObj const* m_nodeHandles{ nullptr }; //!< List of node handles per-instance InstanceIndex m_offset{ 0 };//!< Instance offset in vectorized context size_t m_handleCounts{ 0 }; //!< Number of handles in m_nodeHandles public: /** * @brief Common constructor for a node's database * * @param[in] graphContexts List of ABI objects referencing the graphs in which the node lives * @param[in] nodeObjects List of ABI objects referencing the instanced nodes to which this database belongs * @param[in] handleCount Number of node instances in @p nodeObjects */ OmniGraphDatabase(GraphContextObj const* graphContexts, NodeObj const* nodeObjects, size_t handleCount) : m_graphContextHandles(graphContexts), m_nodeHandles(nodeObjects), m_handleCounts(handleCount) { } /** * @brief Get the attribute Type from the OGN type name * * @param[in] typeNameToken Token with the string representation of the attribute type used by OGN * @returns Attribute type corresponding to the type name, invalid if the type name could not be parsed */ Type typeFromName(NameToken typeNameToken) const { auto typeInterface = carb::getCachedInterface<omni::graph::core::IAttributeType>(); if (!typeInterface) { CARB_LOG_ERROR_ONCE("Could not acquire the IAttributeType interface"); return {}; } auto typeName = tokenToString(typeNameToken); return typeInterface->typeFromOgnTypeName(typeName); } /** * @brief Access the token interface to convert a string to a token, for dealing with unique managed strings * * @param[in] tokenName Name of the string representing a token * @return Token corresponding to the unique string (for fast comparison) */ NameToken stringToToken(const char* tokenName) const { auto tokenInterface = carb::getCachedInterface<omni::fabric::IToken>(); if (!tokenInterface) { CARB_LOG_ERROR_ONCE("Failed to initialize node type - no token interface"); return omni::fabric::kUninitializedToken; } return tokenInterface->getHandle(tokenName); } /** * @brief Access the token interface to convert a token to a string, for dealing with unique managed strings * * @param[in] token Name of the token representing a unique string * @return String corresponding to the token */ const char* tokenToString(NameToken token) const { auto tokenInterface = carb::getCachedInterface<omni::fabric::IToken>(); if (!tokenInterface) { CARB_LOG_ERROR_ONCE("Failed to initialize node type - no token interface"); return nullptr; } return tokenInterface->getText(token); } /** * @brief Access the path interface to convert a string to a path * * @param[in] pathString Name of the string representing a token * @return Path corresponding to the string */ TargetPath stringToPath(const char* pathString) const { auto pathInterface = carb::getCachedInterface<omni::fabric::IPath>(); if (!pathInterface) { CARB_LOG_ERROR_ONCE("Failed to initialize node type - no path interface"); return omni::fabric::kUninitializedPath; } return pathInterface->getHandle(pathString); } /** * @brief Access the path interface to convert a path to a string * * @param[in] path Input path * @return String corresponding to the path */ const char* pathToString(const TargetPath path) const { auto pathInterface = carb::getCachedInterface<omni::fabric::IPath>(); if (!pathInterface) { CARB_LOG_ERROR_ONCE("Failed to initialize node type - no path interface"); return nullptr; } return pathInterface->getText(path); } /** * @brief Access the path interface to convert a path to a token * * @param[in] path Input path * @return Token corresponding to the path */ NameToken pathToToken(const TargetPath path) const { auto pathInterface = carb::getCachedInterface<omni::fabric::IPath>(); auto tokenInterface = carb::getCachedInterface<omni::fabric::IToken>(); if (!pathInterface || !tokenInterface) { CARB_LOG_ERROR_ONCE("Failed to initialize node type - no path or token interface"); return omni::fabric::kUninitializedToken; } return tokenInterface->getHandle(pathInterface->getText(path)); } /** * @brief Access the path interface to convert a token to a path * * @param[in] pathString Token of the path * @return Path corresponding to the token */ TargetPath tokenToPath(const NameToken pathString) const { auto pathInterface = carb::getCachedInterface<omni::fabric::IPath>(); auto tokenInterface = carb::getCachedInterface<omni::fabric::IToken>(); if (!pathInterface || !tokenInterface) { CARB_LOG_ERROR_ONCE("Failed to initialize node type - no path or token interface"); return omni::fabric::kUninitializedPath; } return pathInterface->getHandle(tokenInterface->getText(pathString)); } /** * @brief Access the raw graph ABI object, for when you have to do something really unusual * * @return ABI object referencing the graph in which the node lives */ const GraphContextObj& abi_context(InstanceIndex relativeIdx = { 0 }) const { InstanceIndex idx = m_offset + relativeIdx; if (idx.index < m_handleCounts) return m_graphContextHandles[idx.index]; return m_graphContextHandles[0]; } /** * @brief Access the raw node ABI object, for when you have to do something really unusual * * @return ABI object referencing the node to which this database belongs */ const NodeObj& abi_node(InstanceIndex relativeIdx = { 0 }) const { InstanceIndex idx = m_offset + relativeIdx; if (idx.index < m_handleCounts) return m_nodeHandles[idx.index]; return m_nodeHandles[0]; } /** * @brief Access the user data, cast to the templated type. * * There is no guarantee that the existing user data is of the requested type - that is up to the * caller to ensure. * * @tparam UserDataType Type of the user data * @param[in] relativeIdx Instance index of the node to which the user data applies * @return Internal user data that was set elsewhere, cast to the templated type */ template <typename UserDataType> UserDataType* userData(InstanceIndex relativeIdx = { 0 }) const { NodeObj const& obj = abi_node(relativeIdx); return reinterpret_cast<UserDataType*>(obj.iNode->getUserData(obj)); } /** * @brief Log an evaluation error, warning, or message. * Takes a printf-style list of arguments indicating what happened during evaluation and logs the message. * * @tparam Args... Variable list of printf-like arguments to be formatted * @param[in] nodeObj Node reporting the error * @param[in] inst The graph instance for which this message should be logged * @param[in] sev Severity of the message to report * @param[in] fmt Formatting string * @param[in] args Variadic template arguments in the same form as printf arguments. */ template <typename... Args> static void logMessage(NodeObj const& nodeObj, InstanceIndex inst, Severity sev, const char* fmt, Args&&... args) { if (sizeof...(args) == 0) { nodeObj.iNode->logComputeMessageOnInstance(nodeObj, inst, sev, fmt); } else { std::string msg = formatString(fmt, args...); nodeObj.iNode->logComputeMessageOnInstance(nodeObj, inst, sev, msg.c_str()); } } /** * @brief Log an evaluation error. * Takes a printf-style list of arguments indicating what error during evaluation and logs the message. * * @tparam Args... Variable list of printf-like arguments to be formatted * @param[in] nodeObj Node reporting the error * @param[in] fmt Formatting string * @param[in] args Variadic template arguments in the same form as printf arguments. */ template <typename... Args> static void logError(NodeObj const& nodeObj, const char* fmt, Args&&... args) { logMessage(nodeObj, kAccordingToContextIndex, Severity::eError, fmt, args...); } /** * @brief Log an evaluation error for a specific node instance. * Takes a printf-style list of arguments indicating what error during evaluation and logs the message. * * @tparam Args... Variable list of printf-like arguments to be formatted * @param[in] nodeObj Node reporting the error * @param[in] inst The graph instance for which this message should be logged * @param[in] fmt Formatting string * @param[in] args Variadic template arguments in the same form as printf arguments. */ template <typename... Args> static void logError(NodeObj const& nodeObj, InstanceIndex inst, const char* fmt, Args&&... args) { logMessage(nodeObj, inst, Severity::eError, fmt, args...); } /** * @brief Log an evaluation warning. * Takes a printf-style list of arguments indicating what warning during evaluation and logs the message. * * @tparam Args... Variable list of printf-like arguments to be formatted * @param[in] nodeObj Node reporting the warning * @param[in] fmt Formatting string * @param[in] args Variadic template arguments in the same form as printf arguments. */ template <typename... Args> static void logWarning(NodeObj const& nodeObj, const char* fmt, Args&&... args) { logMessage(nodeObj, kAccordingToContextIndex, Severity::eWarning, fmt, args...); } /** * @brief Log an evaluation warning for a specific node instance. * Takes a printf-style list of arguments indicating what warning during evaluation and logs the message. * * @tparam Args... Variable list of printf-like arguments to be formatted * @param[in] nodeObj Node reporting the warning * @param[in] inst The graph instance for which this message should be logged * @param[in] fmt Formatting string * @param[in] args Variadic template arguments in the same form as printf arguments. */ template <typename... Args> static void logWarning(NodeObj const& nodeObj, InstanceIndex inst, const char* fmt, Args&&... args) { logMessage(nodeObj, inst, Severity::eWarning, fmt, args...); } /** * @brief Log an error using print-like formatting * * @tparam Args... Variable list of printf-like arguments to be formatted * @param[in] fmt Printf-like formatting string * @param[in] args Variable list of printf-like arguments to be formatted */ template <typename... Args> void logError(const char* fmt, Args&&... args) { logError(abi_node(), m_offset, fmt, args...); } /** * @brief Log an error for a specific node instance using print-like formatting * * @tparam Args... Variable list of printf-like arguments to be formatted * @param[in] relativeIdx The graph instance for which this message should be logged * @param[in] fmt Printf-like formatting string * @param[in] args Variable list of printf-like arguments to be formatted */ template <typename... Args> void logError(InstanceIndex relativeIdx, const char* fmt, Args&&... args) { logError(abi_node(relativeIdx), m_offset + relativeIdx, fmt, args...); } /** * @brief Log a warning using print-like formatting * * @tparam Args... Variable list of printf-like arguments to be formatted * @param[in] fmt Printf-like formatting string * @param[in] args Variable list of printf-like arguments to be formatted */ template <typename... Args> void logWarning(const char* fmt, Args&&... args) { logWarning(abi_node(), m_offset, fmt, args...); } /** * @brief Log a warning for a specific node instance using print-like formatting * * @tparam Args... Variable list of printf-like arguments to be formatted * @param[in] relativeIdx The graph instance for which this message should be logged * @param[in] fmt Printf-like formatting string * @param[in] args Variable list of printf-like arguments to be formatted */ template <typename... Args> void logWarning(InstanceIndex relativeIdx, const char* fmt, Args&&... args) { logWarning(abi_node(relativeIdx), m_offset + relativeIdx, fmt, args...); } /** * Retrieves an attribute representing the value of a variable. If the variable * with the given name does not exist in the graph, an invalid attribute is returned. * * @param[in] token: A token representing the variable name * @param[in] relativeIdx: The instance index/offset relative to the one currently pointed by the database * @return An attribute that allows access to the variable data. */ VariableAttribute getVariable(NameToken token, InstanceIndex relativeIdx = { 0 }) { return getVariable(tokenToString(token), relativeIdx); } /** * Retrieves an attribute representing the value of a variable. If the variable * with the given name does not exist in the graph, an invalid attribute is returned. * * @param[in] variableName: A string indicating the variable name * @param[in] relativeIdx: The instance index/offset relative to the one currently pointed by the database * @return An attribute that allows access to the variable data. */ VariableAttribute getVariable(const char* variableName, InstanceIndex relativeIdx = { 0 }) { NodeObj const& obj = abi_node(relativeIdx); GraphContextObj const& ctx = abi_context(relativeIdx); auto graphObj = obj.iNode->getGraph(obj); auto variable = graphObj.iGraph->findVariable(graphObj, variableName); if (!variable) return VariableAttribute(); auto handle = ctx.iContext->getVariableDataHandle(ctx, variable, m_offset + relativeIdx); return VariableAttribute(ctx, handle); } /** * Retrieves the target of the executing graph. * * @param[in] relativeIdx: The instance index/offset relative to the one currently pointed by the database * @return A token representing the prim path of the graph target */ NameToken getGraphTarget(InstanceIndex relativeIdx = { 0 }) const { GraphContextObj const& ctx = abi_context(relativeIdx); return ctx.iContext->getGraphTarget(ctx, relativeIdx+m_offset); } /** * Retrieves the targets of the executing graph. * * @param[in] count: The number of instances available for vectorized access * @return A span of token representing the prims path of the graph targets */ gsl::span<NameToken const> getGraphTargets(size_t count) const { GraphContextObj const& ctx = abi_context(); return { &ctx.iContext->getGraphTarget(ctx, m_offset), count }; } /** * Move to next instance in a vectorized context. * */ inline void moveToNextInstance() { ++m_offset.index; } /** * Move to next instance in a vectorized context. * */ inline void resetToFirstInstance() { m_offset = { 0 }; } /** * Get the current instance index, relative to the the one pointed by the context * */ inline const InstanceIndex& getInstanceIndex() const { return m_offset; } protected: /** * Gets the dynamic attributes of the specified type, if any are registered with the node. * * @param[in] staticAttributeCount: The number of attributes that are statically defined in the ogn node definition. * @param[out] dynamicAttributes: The buffer for the collected dynamic attributes. * @return Returns true if any dynamic attributes of the requested type are found, otherwise returns false. */ template <AttributePortType portType, typename TAttribute> bool tryGetDynamicAttributes(size_t staticAttributeCount, std::vector<TAttribute>& dynamicAttributes) { NodeObj const& obj = abi_node(); GraphContextObj const& ctx = abi_context(); auto totalAttributeCount = obj.iNode->getAttributeCount(obj); if (totalAttributeCount > staticAttributeCount) { dynamicAttributes.reserve(totalAttributeCount - staticAttributeCount); std::vector<AttributeObj> allAttributes(totalAttributeCount); obj.iNode->getAttributes(obj, allAttributes.data(), totalAttributeCount); bool foundAny = false; for (auto const& __a : allAttributes) { if (__a.iAttribute->isDynamic(__a) && __a.iAttribute->getPortType(__a) == portType) { foundAny = true; auto __h = __a.iAttribute->getAttributeDataHandle(__a, kAccordingToContextIndex); dynamicAttributes.emplace_back(m_offset.index); dynamicAttributes.back()().reset(ctx, __h, __a); } } return foundAny; } return false; } /** * @brief Populate an array of dynamic attributes from the node * * @tparam portType Port type from which the dynamic attributes will be pulled * @param staticAttributeCount Number of static attributes on the node * @param dynamicAttributes Array of dynamic attributes, to be populated on return * @return true Dynamic attributes were found and populated * @return false No dynamic attributes were found */ template <AttributePortType portType> bool tryGetDynamicAttributes(size_t staticAttributeCount, std::vector<DynamicInput>& dynamicAttributes) { NodeObj const& obj = abi_node(); GraphContextObj const& ctx = abi_context(); auto totalAttributeCount = obj.iNode->getAttributeCount(obj); if (totalAttributeCount > staticAttributeCount) { dynamicAttributes.reserve(totalAttributeCount - staticAttributeCount); std::vector<AttributeObj> allAttributes(totalAttributeCount); obj.iNode->getAttributes(obj, allAttributes.data(), totalAttributeCount); bool foundAny = false; for (auto const& __a : allAttributes) { if (__a.iAttribute->isDynamic(__a) && __a.iAttribute->getPortType(__a) == portType) { foundAny = true; auto __h = __a.iAttribute->getAttributeDataHandle(__a, kAccordingToContextIndex); dynamicAttributes.emplace_back(m_offset.index); const_cast<typename std::remove_const_t<ogn::RuntimeAttribute<ogn::kOgnInput, ogn::kCpu>&>>(dynamicAttributes.back()()) .reset(ctx, __h, __a); } } return foundAny; } return false; } /** * Updates the node database when a dynamic attribute is created. * * @tparam TAttribute The type of the dynamic attribute that was created (DynamicInput/DynamicOutput/DynamicState) * @param[out] dynamicAttributes: The dynamic attributes buffer. * @param[in] attribute: The newly created attribute. */ template<typename TAttribute> void onDynamicAttributeCreated(std::vector<TAttribute>& dynamicAttributes, AttributeObj const& attribute) { auto handle = attribute.iAttribute->getAttributeDataHandle(attribute, kAccordingToContextIndex); dynamicAttributes.emplace_back(m_offset.index); dynamicAttributes.back()().reset(abi_context(), handle, attribute); } /** * Updates the node database when a dynamic input is created. * * @param[out] dynamicInputs: The dynamic attributes buffer. * @param[in] attribute: The newly created attribute. */ void onDynamicInputsCreated(std::vector<DynamicInput>& dynamicInputs, AttributeObj const& attribute) { auto handle = attribute.iAttribute->getAttributeDataHandle(attribute, kAccordingToContextIndex); dynamicInputs.emplace_back(m_offset.index); const_cast<typename std::remove_const_t<ogn::RuntimeAttribute<ogn::kOgnInput, ogn::kCpu>&>>( dynamicInputs.back()()) .reset(abi_context(), handle, attribute); } /** * Updates the node database when a dynamic attribute is removed. * * @tparam TAttribute The type of the dynamic attribute that was created (DynamicInput/DynamicOutput/DynamicState) * @param[out] dynamicAttributes: The dynamic attributes buffer. * @param[in] attribute: The dynamic attribute about to be removed. */ template<typename TAttribute> void onDynamicAttributeRemoved(std::vector<TAttribute>& dynamicAttributes, AttributeObj const& attribute) { auto handle = attribute.iAttribute->getAttributeDataHandle(attribute, kAccordingToContextIndex); for (auto it = dynamicAttributes.begin(); it != dynamicAttributes.end(); ++it) { if ((*it)().abi_handle() == handle) { dynamicAttributes.erase(it); return; } } } /** * Updates the node database when a dynamic attribute is created or removed. * * @param[out] inputs: The buffer of input dynamic attributes. * @param[out] outputs: The buffer of output dynamic attributes. * @param[out] states: The buffer of state dynamic attributes. * @param[in] attribute: The attribute added or removed. * @param[in] isAttributeCreated: If true, the input attribute is newly created, otherwise it will be removed. */ void onDynamicAttributeCreatedOrRemoved(std::vector<DynamicInput>& inputs, std::vector<DynamicOutput>& outputs, std::vector<DynamicState>& states, AttributeObj const& attribute, bool isAttributeCreated) { switch (attribute.iAttribute->getPortType(attribute)) { case AttributePortType::kAttributePortType_Input: { if (isAttributeCreated) { onDynamicInputsCreated(inputs, attribute); } else { onDynamicAttributeRemoved(inputs, attribute); } break; } case AttributePortType::kAttributePortType_Output: { if (isAttributeCreated) { onDynamicAttributeCreated(outputs, attribute); } else { onDynamicAttributeRemoved(outputs, attribute); } break; } case AttributePortType::kAttributePortType_State: { if (isAttributeCreated) { onDynamicAttributeCreated(states, attribute); } else { onDynamicAttributeRemoved(states, attribute); } break; } default: break; } } }; /** * Creates an input RuntimeAttribute from an output RuntimeAttribute. * * When nodes have to chain the result of multiple computations, for example when * accumulating a value in a loop, it is often necessary to treat the output attribute * as an input in the next iteration of the loop. Because the runtime attributes are strongly typed * and different depending if they are inputs or outputs, they cannot be used interchangeably as arguments * to functions which compute one iteration of a computation. * * This helper function creates an input RuntimeAttribute from the same handle as the original output attribute. * The raw data pointer of the original attribute and the newly constructed input attribute are the same. * * @tparam MemoryType The location of the memory expected for the attribute * @param[in] db: The node database. * @param[in] output: The original output RuntimeAttribute. * @param[in] outputToken: The original output RuntimeAttribute name token. * * @return Returns a RuntimeAttribute with the template type ogn::kOgnInput and the same handle as the original output attribute. */ template <eMemoryType MemoryType> static inline ogn::RuntimeAttribute<ogn::kOgnInput, MemoryType> constructInputFromOutput( OmniGraphDatabase const& db, ogn::RuntimeAttribute<ogn::kOgnOutput, MemoryType> const& output, NameToken outputToken) { auto const& nodeObj = db.abi_node(); auto const& context = db.abi_context(); auto const& resultAttribute = nodeObj.iNode->getAttributeByToken(nodeObj, outputToken); auto handle = resultAttribute.iAttribute->getConstAttributeDataHandle(resultAttribute, db.getInstanceIndex()); return ogn::RuntimeAttribute<ogn::kOgnInput, MemoryType>(context, handle, output.type()); } } // namespace ogn } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/ogn/ComputeHelpersDetails.h
namespace Private { /** ========================= Compute utils=========================== */ // Solves array VS single value args/result // Small helper structure to broacast operation on tuple component when asked for (TUPLE_SIZE != 1) // In that case, computation will be broadcasted to actual tuple elements template <size_t TUPLE_SIZE, typename T> struct TupleHelper { static inline auto& get(T& val, size_t /*tupleIdx*/, size_t idx) { return (&val)[idx]; } static inline auto& getConst(T const& val, size_t /*tupleIdx*/, size_t idx) { return (&val)[idx]; } }; template <size_t TUPLE_SIZE, typename T> struct TupleHelper <TUPLE_SIZE, T[TUPLE_SIZE]> { static inline auto& get(T* val, size_t tupleIdx, size_t idx) { return val[idx * TUPLE_SIZE + tupleIdx]; } static inline auto& getConst(T const* val, size_t tupleIdx, size_t idx) { return val[idx * TUPLE_SIZE + tupleIdx]; } }; template <typename T> struct TupleHelper<1, T> { static inline auto& get(T& val, size_t /*tupleIdx*/, size_t idx) { return (&val)[idx]; } static inline auto& getConst(T const& val, size_t /*tupleIdx*/, size_t idx) { return (&val)[idx]; } }; template <typename T> struct TupleHelper<1, T[1]> { template<typename U> static inline auto& get(U& val, size_t tupleIdx, size_t idx) { return (&val[tupleIdx])[idx]; } template <typename U> static inline auto& getConst(U const& val, size_t tupleIdx, size_t idx) { return (&val[tupleIdx])[idx]; } }; constexpr size_t kNotAnArray{ size_t(-1) }; //Small helper structure to differentiate array attributes from regular attribute, // and allows to make the proper operation depending of it template <size_t TUPLE_SIZE, typename T, bool IsArray = is_array<typename remove_const_ref<T>::type>::value> struct ArrayHelper { static inline auto& accessArg(T& val, size_t /*index*/, size_t tupleIdx, size_t idx){ return TupleHelper<TUPLE_SIZE, T>::get(val, tupleIdx, idx); } static inline auto const& accessArgConst(T const& val, size_t /*index*/, size_t tupleIdx, size_t idx){ return TupleHelper<TUPLE_SIZE, T>::getConst(val, tupleIdx, idx); } template<typename... ARGS> static constexpr size_t setResultLen(size_t, T&, const ARGS&...) { return 1; } static constexpr size_t getArgsLengthAndAdjustHandle(T const&, size_t) { return kNotAnArray; } }; template <size_t TUPLE_SIZE, typename T_> struct ArrayHelper<TUPLE_SIZE, T_, true> { using T = typename remove_const_ref<T_>::type; using InnerType = typename T::iterator::value_type; static inline auto& accessArg(T& val, size_t index, size_t tupleIdx, size_t /*instance*/) { return TupleHelper<TUPLE_SIZE, InnerType>::get(val[index], tupleIdx, 0); } static inline auto const& accessArgConst(T const& val, size_t index, size_t tupleIdx, size_t /*instance*/){ return TupleHelper<TUPLE_SIZE, InnerType>::getConst(val[index], tupleIdx, 0); } template <typename... ARGS> static size_t const setResultLen(size_t instance, T& res, const ARGS&... args) { auto comp = [](size_t ret, size_t others) { if (ret != kNotAnArray) { if (others != kNotAnArray) { if (ret != others) { throw InputError("Unable to broadcast arrays of differing lengths: " + std::to_string(ret) + "!= " + std::to_string(others)); } return std::max(ret, others); } return ret; } return others; }; size_t size = fold(comp, ArrayHelper<1, decltype(args)>::getArgsLengthAndAdjustHandle(args, instance)...); if (size == kNotAnArray) size = 1; //The array is already pointing to the first instance // offset it by 1 instance on every other calls if (instance) res.adjustHandle(1); res.resize(size); return size; } static size_t const getArgsLengthAndAdjustHandle(T const& arg, size_t instance) { // The array is already pointing to the first instance // offset it by 1 instance on every other calls if (instance) const_cast<T&>(arg).adjustHandle(1); return arg.size(); } }; //Perform a broadcasted computation, expanding any provided scalar for each array entry // All provided arrays must have the same length template <size_t TUPLE_SIZE, typename Result, typename Functor, typename... Args> void compute(size_t const count, Result& result, Functor const& functor, const Args&... args) { for (size_t idx = 0; idx < count; ++idx) { size_t const len = ArrayHelper<TUPLE_SIZE, Result>::setResultLen(idx, result, args...); for (size_t i = 0; i < len; ++i) for (size_t j = 0; j < TUPLE_SIZE; ++j) functor(ArrayHelper<TUPLE_SIZE, Args>::accessArgConst(args, i, j, idx)..., ArrayHelper<TUPLE_SIZE, Result>::accessArg(result, i, j, idx)); } } /** ========================= TryCompute utilities =========================== */ // Solves RuntimeAttribute compatibility VS Regular attribute //Helper class to test the validity and access the data of RuntimeAttribute, // or just return the data for regular attribute template <typename T, bool RTData = is_runtime_data<typename remove_const_ref<T>::type>::value> struct TryComputeHelper { static bool testValid(T const& val) { return (bool)val;} static auto& accessData(T&& val) { return *val;} }; template <typename T> struct TryComputeHelper<T, false> { static bool testValid(T const& val) { return true; } static T& accessData(T&& val) { return val; } }; // This functions test validity of its runtime attributes, and if all valid, // forward the call to do the broadcasted computation template <size_t TUPLE_SIZE, typename Result, typename Functor, typename... Args> bool tryCompute(size_t count, Result&& result, Functor const& functor, Args&&... args) { if (!TryComputeHelper<Result>::testValid(result) || !(fold(std::logical_and<>(), TryComputeHelper<decltype(args)>::testValid(args)...))) return false; compute<TUPLE_SIZE>(count, TryComputeHelper<Result>::accessData(std::forward<Result&&>(result)), functor, TryComputeHelper<Args>::accessData(std::forward<Args&&>(args))...); return true; } /** ========================= TryComputeArray utilities =========================== */ // Some/All args are Runtime, and might be array or simple value: this block of functionalities select the proper combination // of array/not array for runtime attribs //Small helper struct that separates RuntimeAttribute from regular attribute, // and allows to retrieve the asked accessors for those Runtime attributes template <size_t TUPLE_SIZE, typename T> struct RuntimeAttribHelper { using T_ = typename remove_const_ref<T>::type; // the templated type is only use with RT attribute to retrieve the value template <typename Useless> static T_& accessData(T_& val) { return val; } template <typename Useless> static T_ const& accessData(T_ const& val) { return val; } static bool isArray(T const&) { return false; } static bool isTuple(T const& val) { return false; } }; template <size_t TUPLE_SIZE, eAttributeType AttributeType, eMemoryType MemoryType> struct RuntimeAttribHelper<TUPLE_SIZE, ogn::RuntimeAttribute<AttributeType, MemoryType>> { using RTAttr = ogn::RuntimeAttribute<AttributeType, MemoryType>; template <typename T> static auto accessData(RTAttr const& val) { return val.template get<T>(); } static bool isArray(RTAttr const& val) { return val.type().arrayDepth != 0; } static bool isTuple(RTAttr const& val) { return TUPLE_SIZE != 1 && val.type().componentCount == TUPLE_SIZE; } }; // This struct is a helper that will scan a list of input arguments to determine whether or not each of them is an array. // This information is collected by recursively calling "callCorrectCompute", and acumulating the resolved types in the class // template arguments "ResolvedTypes". // Once the recursive call ends (when all the inputs have been scanned), a specialized version of "callCorrectCompute" // unpack the tuple to forward the call to tryCompute (through "makeCall"), with all the appropriate accessors for runtime attribute resolved template <typename... ResolvedTypes> struct ArgumentTypeSelector { template <size_t TUPLE_SIZE, typename... ComputeTypes, typename Ret, typename Functor, typename Args> static bool callCorrectCompute( bool const allowArrays, size_t count, Ret&& ret, Functor const& f, Args&& args, typename std::enable_if<sizeof...(ResolvedTypes) != sizeof...(ComputeTypes)>::type* _cond = 0) { using ComputeType = typename std::tuple_element<sizeof...(ResolvedTypes), std::tuple<ComputeTypes...>>::type; using ComputeTypeTuple = ComputeType[TUPLE_SIZE]; auto const& arg = std::get<sizeof...(ResolvedTypes)>(args); using RTAH = RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(arg)>::type>; if (RTAH::isArray(arg)) { if (allowArrays == false) return false; if (RTAH::isTuple(arg)) return ArgumentTypeSelector<ResolvedTypes..., ComputeTypeTuple[]>::template callCorrectCompute<TUPLE_SIZE, ComputeTypes...>(allowArrays, count, ret, f, args); return ArgumentTypeSelector<ResolvedTypes..., ComputeType[]>::template callCorrectCompute<TUPLE_SIZE, ComputeTypes...>(allowArrays, count, ret, f, args); } if (RTAH::isTuple(arg)) return ArgumentTypeSelector<ResolvedTypes..., ComputeTypeTuple>::template callCorrectCompute<TUPLE_SIZE, ComputeTypes...>(allowArrays, count, ret, f, args); return ArgumentTypeSelector<ResolvedTypes..., ComputeType>::template callCorrectCompute<TUPLE_SIZE, ComputeTypes...>(allowArrays, count, ret, f, args); } template <size_t TUPLE_SIZE, typename... ComputeTypes, typename Ret, typename Functor, typename Args> static bool callCorrectCompute( bool const /*allowArrays*/, size_t count, Ret&& ret, Functor const& f, Args&& args, typename std::enable_if<sizeof...(ResolvedTypes) == sizeof...(ComputeTypes)>::type* _cond = 0) { return makeCall<TUPLE_SIZE>(count, ret, f, args, std::make_index_sequence<sizeof...(ComputeTypes)>()); } template <size_t TUPLE_SIZE, size_t... Indices, typename Ret, typename Functor, typename Args> static bool makeCall(size_t count, Ret&& ret, Functor const& f, Args&& args, std::index_sequence<Indices...>) { using Args_ = typename remove_const_ref<Args>::type; return tryCompute<TUPLE_SIZE>( count, ret, f, RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<typename std::tuple_element<Indices, Args_>::type>::type> ::template accessData<ResolvedTypes>(std::get<Indices>(args))...); } }; // Main entry point for broadcasting the provided functor to each element of arrays (if arrays are provided) // In the case of the output not being an array, it is assumed that inputs are not either, // and this call is just forwarded to the compute function which will check that that inputs have compatible types // If the output is an array, the call is forwarded to the "ArgumentTypeSelector" helper class, that will scan each input argument // to determine whether or not is an array, and perform the final broadcasting call. // The parameter pack is passed as a std::tuple so it is workable template <size_t TUPLE_SIZE, typename RetType, typename... ComputeTypes, typename Functor, typename... Args> bool tryComputeWithArrayBroadcasting(size_t count, ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu>& result, Functor const& functor, const Args&... args) { using RetTypeTuple = RetType[TUPLE_SIZE]; if (result.type().arrayDepth == 0) { if (TUPLE_SIZE != 1) { if (result.type().componentCount != TUPLE_SIZE) return false; return ArgumentTypeSelector<>::callCorrectCompute<TUPLE_SIZE, ComputeTypes...>( false, count, result.get<RetTypeTuple>(), functor, std::forward_as_tuple(args...)); } return ArgumentTypeSelector<>::callCorrectCompute<TUPLE_SIZE, ComputeTypes...>( false, count, result.get<RetType>(), functor, std::forward_as_tuple(args...)); } if (TUPLE_SIZE != 1) { if (result.type().componentCount != TUPLE_SIZE) return false; return ArgumentTypeSelector<>::callCorrectCompute<TUPLE_SIZE, ComputeTypes...>( true, count, result.get<RetTypeTuple[]>(), functor, std::forward_as_tuple(args...)); } return ArgumentTypeSelector<>::callCorrectCompute<TUPLE_SIZE, ComputeTypes...>( true, count, result.get<RetType[]>(), functor, std::forward_as_tuple(args...)); } // Meta function that selects a given element in a tuple, or, in the case of a single-element tuple // returns this unique element regardless of the index // @input TUPLE: the tuple on which to operate // @input IDX: the index at which the element must be extracted // @private COUNT: private argument that allows to specialize for single-element TUPLE input template <typename TUPLE, size_t IDX, size_t COUNT = std::tuple_size<TUPLE>::value> struct AccessComputeTypes { using type = typename std::tuple_element<IDX, TUPLE>::type; }; template <typename TUPLE, size_t IDX> struct AccessComputeTypes<TUPLE, IDX, 1> { using type = typename std::tuple_element<0, TUPLE>::type; }; //This functions is a pass-through adapter that has 2 purposes: //#1/ We allow to provide all the compute types that will be used to access the inputs/output data, // but also a unique one, that will be applied to each input/output. This is the purpose of AccessComputeTypes: // Selectively expand a unique type to a parameter pack duplicating this type the right amount of time, // or use the parameter pack as provided. The pack provided (containing 1 or n elements) is converted to std::tuple // in order to be workable. //#2/ Changes the order of arguments, moving the output and the functor as first arguments, // allowing to move a random number of inputs as a parameter pack at the end of the function template <size_t TUPLE_SIZE, typename... ComputeTypes, typename TUPLE, size_t... Inputs> inline bool tryComputeWithArrayBroadcasting_ReverseHelper(TUPLE&& tuple, std::index_sequence<Inputs...>, size_t count) { using CTTuple = std::tuple<ComputeTypes...>; return tryComputeWithArrayBroadcasting< TUPLE_SIZE, typename AccessComputeTypes<CTTuple, sizeof...(Inputs)>::type, typename AccessComputeTypes<CTTuple, Inputs>::type...>( count, std::get<sizeof...(Inputs)>(tuple), std::get<sizeof...(Inputs) + 1>(tuple), std::get<Inputs>(tuple)...); } // This functions is a pass-through adapter that forward the arguments as a tuple, and creates a corresponding index_sequence // This will allow the ReverseHelper to do its job of modifying the parameter order (see purpose #2 above) template <typename TUPLE, size_t... Inputs> inline bool tryCompute_ReverseHelper(TUPLE&& tuple, std::index_sequence<Inputs...>, size_t count) { using RetType = typename std::tuple_element<sizeof...(Inputs), TUPLE>::type; return tryCompute<1>( count, std::forward<RetType&&>(std::get<sizeof...(Inputs)>(tuple)), std::get<sizeof...(Inputs) + 1>(tuple), std::get<Inputs>(tuple)...); } // Small helper to detect whether a vectorized compute has been asked or not template <typename... Arguments> struct VectorizationHelper { using Tuple = std::tuple<Arguments...>; using LastType = typename std::tuple_element<sizeof...(Arguments) - 1, Tuple>::type; static constexpr bool isVectorized = std::is_integral<typename remove_const_ref<LastType>::type>::value; static constexpr int Offset = isVectorized ? 1 : 0; static size_t count(Tuple&& args) { return count_helper(std::forward<Tuple>(args), std::integral_constant<bool,isVectorized>()); } private: static size_t count_helper(Tuple&& args, std::false_type /*isVectorized*/) { return 1; } static size_t count_helper(Tuple&& args, std::true_type /*isVectorized*/) { return std::get<sizeof...(Arguments) - 1>(args);} }; }//namespace private
omniverse-code/kit/include/omni/graph/core/dirtyid/IDirtyID2.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "IDirtyID1.h" #include <omni/core/IObject.h> #include <omni/graph/core/Handle.h> #include <type_traits> namespace omni { namespace graph { namespace core { namespace unstable { OMNI_DECLARE_INTERFACE(IDirtyID2); // ==================================================================================================== /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| */ // The DirtyId interface exposes the implementation details of change tracking system for bundles. // This functionality is exposed to allow old interfaces such as BundlePrims to function. // Eventually this interface is going to be removed, once BundlePrims is fully deprecated. // ==================================================================================================== class IDirtyID2_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.IDirtyID2")> { protected: //! @brief Activate or deactivate dirty id tracking for specific bundle on its attributes and children. //! @param handle to dirty id track. //! @param isActive Bool flag indicating if tracking is on or off. //! @return An omni::core::Result indicating the success of the operation. OMNI_ATTR("no_py") virtual omni::core::Result setup_abi(BundleHandle handle, bool isActive) noexcept = 0; //! @brief Get dirty id values for bundles. //! //! The output array for dirty id values needs to be at least size of input //! array of bundle handles. //! //! @param bundles Input bundles to get dirty ids for. //! @param bundleCount Size of input bundles. //! @param ids Output dirty id values for requested bundles. //! //! @returns Success if operation was successful. OMNI_ATTR("no_py") virtual omni::core::Result getForBundles_abi( ConstBundleHandle const* const bundles OMNI_ATTR("in, throw_if_null, count=bundleCount"), size_t bundleCount OMNI_ATTR("in"), DirtyIDType* const ids OMNI_ATTR("out, throw_if_null, count=bundleCount")) noexcept = 0; //! @brief Get dirty id values for attributes. //! //! The output array for dirty id values needs to be at least size of input //! array of attribute handles. //! //! @param attributes Input attributes to get dirty ids for. //! @param attributeCount Size of input attributes. //! @param ids Output dirty id values for requested attributes. //! //! @returns Success if operation was successful. OMNI_ATTR("no_py") virtual omni::core::Result getForAttributes_abi( ConstAttributeDataHandle const* const attributes OMNI_ATTR("in, throw_if_null, count=attributeCount"), size_t attributeCount OMNI_ATTR("in"), DirtyIDType* const ids OMNI_ATTR("out, throw_if_null, count=attributeCount")) noexcept = 0; }; } } } } #include "IDirtyID2.gen.h" OMNI_DEFINE_INTERFACE_API(omni::graph::core::unstable::IDirtyID2) { public: using DirtyIDType = omni::graph::core::DirtyIDType; using BundleHandle = omni::graph::core::BundleHandle; using ConstBundleHandle = omni::graph::core::ConstBundleHandle; using AttributeDataHandle = omni::graph::core::AttributeDataHandle; using ConstAttributeDataHandle = omni::graph::core::ConstAttributeDataHandle; //! @return Get invalid dirty id value. static DirtyIDType getInvalid() noexcept { return omni::graph::core::kInvalidDirtyID; } //! @brief Checks if given dirty id is valid dirty id. static bool isValid(DirtyIDType value) noexcept { return value != getInvalid(); } //! @brief Get array of dirty id values for array of bundles. //! //! The output array for dirty id values needs to be at least size of input //! array of bundle handles. //! //! @param bundles Input array bundle handles to get dirty ids for. //! @param bundleCount Size of input array of bundle handles. //! @param ids Output array of dirty id values for requested bundles. //! //! @returns Success if operation was successful. template <typename BUNDLE_HANDLE> omni::core::Result getForBundles(BUNDLE_HANDLE* const bundles, size_t bundleCount, DirtyIDType* const ids) { using BundleHandle_t = typename std::remove_cv<BUNDLE_HANDLE>::type; static_assert(std::is_same<BundleHandle_t, BundleHandle>::value || std::is_same<BundleHandle_t, ConstBundleHandle>::value); return getForBundles_abi(reinterpret_cast<ConstBundleHandle const* const>(bundles), bundleCount, ids); } //! @brief Get dirty id value for a bundle. //! //! @param handle Input bundle handle to get dirty id for. //! //! @returns A valid dirty id if success, invalid dirty id if failure. DirtyIDType getForBundle(ConstBundleHandle handle) { DirtyIDType value = getInvalid(); auto const result = getForBundles_abi(&handle, 1, &value); return result == kResultSuccess ? value : getInvalid(); } //! @brief Get array of dirty id values for array of attributes. //! //! The output array for dirty id values needs to be at least size of input //! array of attribute handles. //! //! @param attributes Input array attribute handles to get dirty ids for. //! @param attributeCount Size of input array of attribute handles. //! @param ids Output array of dirty id values for requested attributes. //! //! @returns Success if operation was successful. template <typename ATTRIBUTE_HANDLE> omni::core::Result getForAttributes(ATTRIBUTE_HANDLE* const attributes, size_t attributeCount, DirtyIDType* const ids) { using AttributeDataHandle_t = typename std::remove_cv<ATTRIBUTE_HANDLE>::type; static_assert(std::is_same<AttributeDataHandle_t, AttributeDataHandle>::value || std::is_same<AttributeDataHandle_t, ConstAttributeDataHandle>::value); return getForAttributes_abi( reinterpret_cast<ConstAttributeDataHandle const* const>(attributes), attributeCount, ids); } //! @brief Get dirty id value for an attribute. //! //! @param handle Input attribute handle to get dirty id for. //! //! @returns A valid dirty id if success, invalid dirty id if failure. DirtyIDType getForAttribute(ConstAttributeDataHandle handle) { DirtyIDType value = getInvalid(); auto const result = getForAttributes_abi(&handle, 1, &value); return result == kResultSuccess ? value : getInvalid(); } };
omniverse-code/kit/include/omni/graph/core/dirtyid/IDirtyID1.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ==================================================================================================== /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| This is a temporary interface that can change at any time. */ // ==================================================================================================== #include <carb/Interface.h> #include <cstdint> namespace omni { namespace graph { namespace core { using DirtyIDType = uint64_t; constexpr DirtyIDType kInvalidDirtyID = ~static_cast<DirtyIDType>(0); struct IDirtyID { CARB_PLUGIN_INTERFACE("omni::graph::core::IDirtyID", 1, 0) /** * @return The next dirty ID, atomically incrementing the counter inside. */ DirtyIDType(CARB_ABI* getNextDirtyID)() = nullptr; }; template <typename PREVIOUS_T> bool checkDirtyIDChanged(PREVIOUS_T& previousID, DirtyIDType newID) { if (newID != previousID) { previousID = newID; return true; } // Equal, but if they're invalid, still treat them as changed return (newID == kInvalidDirtyID); } } } }
omniverse-code/kit/include/omni/graph/core/dirtyid/IDirtyID2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL template <> class omni::core::Generated<omni::graph::core::unstable::IDirtyID2_abi> : public omni::graph::core::unstable::IDirtyID2_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::unstable::IDirtyID2") //! @brief Activate or deactivate dirty id tracking for specific bundle on its attributes and children. //! @param handle to dirty id track. //! @param isActive Bool flag indicating if tracking is on or off. //! @return An omni::core::Result indicating the success of the operation. omni::core::Result setup(const omni::graph::core::BundleHandle& handle, bool isActive) noexcept; //! @brief Get dirty id values for bundles. //! //! The output array for dirty id values needs to be at least size of input //! array of bundle handles. //! //! @param bundles Input bundles to get dirty ids for. //! @param bundleCount Size of input bundles. //! @param ids Output dirty id values for requested bundles. //! //! @returns Success if operation was successful. omni::core::Result getForBundles(const omni::graph::core::ConstBundleHandle* const bundles, size_t bundleCount, omni::graph::core::DirtyIDType* const ids); //! @brief Get dirty id values for attributes. //! //! The output array for dirty id values needs to be at least size of input //! array of attribute handles. //! //! @param attributes Input attributes to get dirty ids for. //! @param attributeCount Size of input attributes. //! @param ids Output dirty id values for requested attributes. //! //! @returns Success if operation was successful. omni::core::Result getForAttributes(const omni::graph::core::ConstAttributeDataHandle* const attributes, size_t attributeCount, omni::graph::core::DirtyIDType* const ids); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::Result omni::core::Generated<omni::graph::core::unstable::IDirtyID2_abi>::setup( const omni::graph::core::BundleHandle& handle, bool isActive) noexcept { return setup_abi(handle, isActive); } inline omni::core::Result omni::core::Generated<omni::graph::core::unstable::IDirtyID2_abi>::getForBundles( const omni::graph::core::ConstBundleHandle* const bundles, size_t bundleCount, omni::graph::core::DirtyIDType* const ids) { OMNI_THROW_IF_ARG_NULL(bundles); OMNI_THROW_IF_ARG_NULL(ids); auto return_ = getForBundles_abi(bundles, bundleCount, ids); return return_; } inline omni::core::Result omni::core::Generated<omni::graph::core::unstable::IDirtyID2_abi>::getForAttributes( const omni::graph::core::ConstAttributeDataHandle* const attributes, size_t attributeCount, omni::graph::core::DirtyIDType* const ids) { OMNI_THROW_IF_ARG_NULL(attributes); OMNI_THROW_IF_ARG_NULL(ids); auto return_ = getForAttributes_abi(attributes, attributeCount, ids); return return_; } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/dirtyid/PyIDirtyID2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIDirtyID2(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::unstable::IDirtyID2_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::unstable::IDirtyID2_abi>>, omni::core::IObject> clsParent(m, "_IDirtyID2"); py::class_<omni::graph::core::unstable::IDirtyID2, omni::core::Generated<omni::graph::core::unstable::IDirtyID2_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::unstable::IDirtyID2>, omni::core::IObject> cls(m, "IDirtyID2"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::unstable::IDirtyID2>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::unstable::IDirtyID2>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::unstable::IDirtyID2 instantiation"); } return tmp; })); return omni::python::PyBind<omni::graph::core::unstable::IDirtyID2>::bind(cls); }
omniverse-code/kit/include/omni/graph/core/unstable/GenericNodeDef.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file NodeDef.h //! //! @brief Declares @ref omni::graph::core::unstable::IGenericNodeDef. #pragma once #include <carb/profiler/Profile.h> #include <omni/graph/core/IInternal.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/unstable/IGenericNodeDef.h> #include <omni/graph/exec/unstable/NodeDef.h> #include <omni/graph/exec/unstable/ExecutionTask.h> namespace omni { namespace graph { namespace core { namespace unstable { //! @copydoc omni::graph::core::unstable::IGenericNodeDef class GenericNodeDef : public exec::unstable::NodeDefT<IGenericNodeDef> { public: //! Construct OmniGraph node definition which will use call ogn compute method to execute the node //! //! @param definitionName Definition name is considered as a token that transformation passes can register against //! @param nodeObj Authoring node associated with this definition instance //! //! May throw. static omni::core::ObjectPtr<GenericNodeDef> create(const char* definitionName, const NodeObj& nodeObj) { OMNI_THROW_IF_ARG_NULL(definitionName); return omni::core::steal(new GenericNodeDef(definitionName, nodeObj)); } //! Determine scheduling constraint based on the OmniGraph node object static exec::unstable::SchedulingInfo getSchedulingInfo(const NodeObj& nodeObj) { auto schedulingInfo = exec::unstable::SchedulingInfo::eSerial; NodeTypeObj nodeTypeObj = nodeObj.iNode->getNodeTypeObj(nodeObj); ISchedulingHints* const schedulingHints = nodeTypeObj.iNodeType->getSchedulingHints(nodeTypeObj); ISchedulingHints2* const schedulingHints2 = omni::core::cast<ISchedulingHints2>(schedulingHints).get(); if (schedulingHints) { if (schedulingHints->getThreadSafety() == eThreadSafety::eSafe || (schedulingHints2 && schedulingHints2->getPurityStatus() == ePurityStatus::ePure)) { // schedulingHints()->getDataAccess(eAccessLocation::eUsd) == eAccessType::eRead // depending on situation, above can be scheduled in parallel. In general it is safer // to assume serial, since USD composition engine is not thread safe...but once the stage // is composed, pulling out individual attribute values is safe. // Currently we will hurt performance for nodes like OgnReadPrimAttribute and make // our mainline branch be slower than prototype. We will explore runtime override // to scheduling hints that would allow changing the dispatch based on runtime information. // Also note that with the current ordering of the if-else statements, a node // can technically be both threadsafe AND write to usd, if the node's compute // implementation allows for it (e.g. with a delay write to USD that leverages // the OG "registerForUSDWriteBack"). schedulingInfo = exec::unstable::SchedulingInfo::eParallel; } else if (schedulingHints->getDataAccess(eAccessLocation::eUsd) == eAccessType::eWrite) { schedulingInfo = exec::unstable::SchedulingInfo::eIsolate; } } return schedulingInfo; } protected: using BaseType = exec::unstable::NodeDefT<IGenericNodeDef>; exec::unstable::Status execute_abi(exec::unstable::ExecutionTask* info) noexcept override { exec::unstable::ExecutionTask* currentTask = exec::unstable::getCurrentTask(); if (!m_nodeObj.iNode->isDisabled(m_nodeObj) && currentTask) { InstanceIndex baseInstanceIndex{ 0 }; size_t numberOfInstances{ 1 }; m_privateDef->getInstanceInfo(*info, baseInstanceIndex, numberOfInstances); CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "NodeCompute"); if (m_nodeTypeObj.iNodeType->computeVectorized) { currentTask->setUserIndex(baseInstanceIndex.index); m_nodeTypeObj.iNodeType->computeVectorized(m_contextObj, m_nodeObj, numberOfInstances); } else { if (m_nodeTypeObj.iNodeType->compute == nullptr) { // this can happen if a node is provided by an extension and the user unloads the extension CARB_LOG_ERROR_ONCE("Node implementation lost, aborting graph execution"); } else { InstanceIndex currentInstance = baseInstanceIndex; for (size_t i = 0; i < numberOfInstances; ++i) { currentTask->setUserIndex(currentInstance.index); m_nodeTypeObj.iNodeType->compute(m_contextObj, m_nodeObj); ++currentInstance; } } } } return exec::unstable::Status::eSuccess; } exec::unstable::SchedulingInfo getSchedulingInfo_abi(const exec::unstable::ExecutionTask* info) noexcept override { return m_schedulingInfo; } NodeObj getAuthoringNode_abi() noexcept override { return m_nodeObj; } GenericNodeDef(const char* definitionName, const NodeObj& nodeObj) : BaseType(definitionName), m_nodeObj{ nodeObj }, m_schedulingInfo(getSchedulingInfo(nodeObj)), m_privateDef(carb::getCachedInterface<omni::graph::core::IInternal>()->createPrivateNodeDef(nodeObj), omni::core::kSteal) { GraphObj graphObj = nodeObj.iNode->getGraph(nodeObj); m_contextObj = graphObj.iGraph->getDefaultGraphContext(graphObj); m_nodeTypeObj = nodeObj.iNode->getNodeTypeObj(nodeObj); } private: NodeObj m_nodeObj; //!< Authoring node this instance is defining for execution GraphContextObj m_contextObj; //!< Authoring graph context stored for speed of execution NodeTypeObj m_nodeTypeObj; //!< Authoring node type information stored for speed of execution exec::unstable::SchedulingInfo m_schedulingInfo; //!< Scheduling constraint omni::core::ObjectPtr<IPrivateNodeDef> m_privateDef; //!< Internal object, hiding implementation details //!< relying on OG internals }; } // namespace unstable } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/unstable/IGenericNodeGraphDef.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Node graph definition interface for OmniGraph graph. //! //! This interface allows us to reason about OmniGraph graphs during graph transformation phase and access authoring //! graph associated with this definition. template <> class omni::core::Generated<omni::graph::core::unstable::IGenericNodeGraphDef_abi> : public omni::graph::core::unstable::IGenericNodeGraphDef_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::unstable::IGenericNodeGraphDef") //! Returns a handle to authoring graph associated with this definition. omni::graph::core::GraphObj getAuthoringGraph() noexcept; //! Returns True if this graph is used as part of instancing pipeline, false otherwise. //! //! @note We will most likely remove this before making this interface stable bool isInstanced() noexcept; //! Returns execution node associated with a given authoring node, or nullptr //! if no association was discovered in this definition. //! //! The returned @ref INode will *not* have @ref omni::core::IObject::acquire() called before being //! returned. omni::graph::exec::unstable::INode* getExecutionNode(const omni::graph::core::NodeObj& nodeObj) noexcept; //! Construct the graph. //! //! Construction can happen for entire execution graph or only affected by a topological change definitions. void build(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder) noexcept; //! Inspect the state of the graph bool inspect(omni::core::ObjectParam<inspect::IInspector> inspector) noexcept; //! Acquire internal information about bucketing of instances for vectorized execution //! //! Arguments must not be @c nullptr. void getInstanceInfo(const omni::graph::exec::unstable::ExecutionTask& info, omni::graph::core::InstanceIndex& retBaseInstanceIndex, size_t& retNumberOfInstances) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::core::GraphObj omni::core::Generated< omni::graph::core::unstable::IGenericNodeGraphDef_abi>::getAuthoringGraph() noexcept { return getAuthoringGraph_abi(); } inline bool omni::core::Generated<omni::graph::core::unstable::IGenericNodeGraphDef_abi>::isInstanced() noexcept { return isInstanced_abi(); } inline omni::graph::exec::unstable::INode* omni::core::Generated< omni::graph::core::unstable::IGenericNodeGraphDef_abi>::getExecutionNode(const omni::graph::core::NodeObj& nodeObj) noexcept { return getExecutionNode_abi(&nodeObj); } inline void omni::core::Generated<omni::graph::core::unstable::IGenericNodeGraphDef_abi>::build( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder) noexcept { build_abi(builder.get()); } inline bool omni::core::Generated<omni::graph::core::unstable::IGenericNodeGraphDef_abi>::inspect( omni::core::ObjectParam<inspect::IInspector> inspector) noexcept { return inspect_abi(inspector.get()); } inline void omni::core::Generated<omni::graph::core::unstable::IGenericNodeGraphDef_abi>::getInstanceInfo( const omni::graph::exec::unstable::ExecutionTask& info, omni::graph::core::InstanceIndex& retBaseInstanceIndex, size_t& retNumberOfInstances) noexcept { getInstanceInfo_abi(&info, &retBaseInstanceIndex, &retNumberOfInstances); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/unstable/INodeTypeForwarding.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // This file contains the definition of the INodeTypeForwarding ONI class // This ABI is unstable and subject to change /* _ _ _____ ______ _______ __ ______ _ _ _____ ______ ___ _ _____ _____ _____ _ __ */ /* | | | |/ ____| ____| /\|__ __| \ \ / / __ \| | | | __ \ / __ \ \ / / \ | | | __ \|_ _|/ ____| |/ / */ /* | | | | (___ | |__ / \ | | \ \_/ / | | | | | | |__) | | | | \ \ /\ / /| \| | | |__) | | | | (___ | ' / */ /* | | | |\___ \| __| / /\ \ | | \ /| | | | | | | _ / | | | |\ \/ \/ / | . ` | | _ / | | \___ \| < */ /* | |__| |____) | |____ / ____ \| | | | | |__| | |__| | | \ \ | |__| | \ /\ / | |\ | | | \ \ _| |_ ____) | . \ */ /* \____/|_____/|______| /_/ \_\_| |_| \____/ \____/|_| \_\ \____/ \/ \/ |_| \_| |_| \_\_____|_____/|_|\_| */ #include <omni/core/Omni.h> #include <omni/core/IObject.h> namespace omni { namespace graph { namespace core { namespace unstable { OMNI_DECLARE_INTERFACE(INodeTypeForwarding); /** * @brief Interface that creates a forward on a request for a node type to a different node type * * There are a couple of different common use cases for needing a forward: * - Node type gets renamed * - Node type moves from one extension to another * * The node type forward specifies the unique node type name so if extension omni.my.extension has a node whose type * is specified as "MyNode" then the forward must be from "omni.my.extension.MyNode". * * The forwarding is version-based as well, where the version is a minimum number required for forwarding, the usual * node version update mechanism not withstanding. For example, if you set up a forward from "omni.nodes.MyNode" version * 2 to "omni.my_nodes.MyNode" version 3 then any larger version number is forwarded to the same location: * - omni.nodes.MyNode(2) -> omni.my_nodes.MyNode(3) * - omni.nodes.MyNode(3) -> omni.my_nodes.MyNode(3) * - omni.nodes.MyNode(4) -> omni.my_nodes.MyNode(3) * * The forwards can also have multiple versions forwarding to different locations, so if on top of the above forward * you also add a forward from "omni.nodes.MyNode" version 3 to "omni.new_nodes.MyNode" version 4 then these become * the example forward locations: * - omni.nodes.MyNode(2) -> omni.my_nodes.MyNode(3) * - omni.nodes.MyNode(3) -> omni.new_nodes.MyNode(4) * - omni.nodes.MyNode(4) -> omni.new_nodes.MyNode(4) * * Version numbers lower than the first forward are left as-is * - omni.nodes.MyNode(1) -> omni.nodes.MyNode(1) * * @note The usual mechanism of calling updateVersionNumber on a node is only applied after a forward so in the above * cases requesting omni.nodes.MyNode(2) does not call updateVersionNumber(1,2) on your omni.nodes.MyNode * implementation. * * Node type forwards are associative, so if A forwards to B and B forwards to C then when you request A you get C. * Adding a new forward from omni.my_nodes.MyNode(3) to omni.new_nodes.MyNode(2) above yields this forwarding: * - omni.nodes.MyNode(2) -> omni.my_nodes.MyNode(3) -> omni.new_nodes.MyNode(2) * - omni.nodes.MyNode(3) -> omni.new_nodes.MyNode(4) * - omni.nodes.MyNode(4) -> omni.new_nodes.MyNode(4) */ class INodeTypeForwarding_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.INodeTypeForwarding")> { protected: /** * @returns Number of currently defined forwards */ virtual size_t getForwardCount_abi() noexcept = 0; /** * @brief Get the list of available forwards and their redirections. * Note that there is no guarantee that the replacement names are legitimate node type names, only that they * have been registered as replacements. * * The caller is responsible for allocating and destroying buffers large enough to hold "bufferSize" results. * If bufferSize > getForwardCount() then the entries at the ends of the buffers will be filled with nullptr. * * @param[out] forwardNameBuffer List of forward names to be replaced * @param[out] forwardVersionBuffer List of the first version of each forward name to be replaced * @param[out] replacementNameBuffer List of node type names that replace the forwarded names * @param[out] replacementVersionBuffer List of node type versions that replace the forwarded names * @param[out] extensionIdBuffer List of extension IDs corresponding to the replacement node type names. * @param[in] bufferSize Number of entries to fill in the buffers * @return true if the buffers were successfully filled and the bufferSize matched the forward count */ OMNI_ATTR("no_py") virtual bool getForwards_abi( OMNI_ATTR("*c_str, out, not_null, count=bufferSize") char const** forwardNameBuffer, OMNI_ATTR("out, not_null, count=bufferSize") int* forwardVersionBuffer, OMNI_ATTR("*c_str, out, not_null, count=bufferSize") char const** replacementNameBuffer, OMNI_ATTR("out, not_null, count=bufferSize") int* replacementVersionBuffer, OMNI_ATTR("*c_str, out, not_null, count=bufferSize") char const** extensionIdBuffer, size_t bufferSize ) noexcept = 0; /** * @brief Define a new node type forward. * It is allowed to have the same forwardName to be defined more than once, however the "forwardVersion" must be * different from any existing ones. Later "forwardVersion" numbers will supersede earlier ones in this case. * For example if you have these two forwards set up: * OldNode,1 -> BetterNode,1,omni.better.extension * OldNode,2 -> MuchBetterNode,1,omni.much_better.extension * then when version 1 of "OldNode" is requested it will treat it as if you requested "BetterNode", but when * versions 2 or later are requested it will instead treat it as if you requested "MuchBetterNode". These can be * chained together: * OldNode,1 -> BetterNode,1,omni.better.extension * BetterNode,1 -> MuchBetterNode,1,omni.much_better.extension * * @param[in] forwardName Name to be replaced * @param[in] forwardVersion The first version of the forward name to be replaced * @param[in] replacementName Node type name that replaces the forwarded name * @param[in] replacementVersion Version of the node type that replaces the forwarded name * @param[in] extensionId Extension ID in which the replacement node type can be found * @return false if there was already an forward with the given name and initial version number */ virtual bool defineForward_abi( OMNI_ATTR("c_str, in, not_null") char const* forwardName, int forwardVersion, OMNI_ATTR("c_str, in, not_null") char const* replacementName, int replacementVersion, OMNI_ATTR("c_str, in, not_null") char const* replacementExtensionId ) noexcept = 0; /** * @brief Remove an existing node type forward. * Since an forwardName + forwardVersion combination is unique there is no need to pass in the replacement information. * Only the forward with the matching version is removed. Any others with the same name remain untouched. * * @param[in] forwardName Forward to be removed * @param[in] forwardVersion The version at which the forward is to be removed * @return false if there was already an forward with the given name and initial version number */ virtual bool removeForward_abi( OMNI_ATTR("c_str, in, not_null") char const* forwardName, int forwardVersion ) noexcept = 0; /** * @brief Remove forwards referencing a given node type name. * * @param[in] referencedName Forward to be removed * @param[in] referencedVersion The version at which the forward is to be removed * @return number of forwards to the given type that were removed */ virtual size_t removeForwardedType_abi( OMNI_ATTR("c_str, in, not_null") char const* referencedName, int referencedVersion ) noexcept = 0; /** * @brief Find a node type name replacement corresponding to the given node type forward name and version * * @param[in] forwardName Name of the node type forward to look up * @param[in] forwardVersion Version number of the node type forward being looked up * @param[out] nodeTypeName Pointer to the name of the actual node type name corresponding to the forward, * or nullptr if there is no equivalent. This string is a constant and managed * by the node type forward system so it should not be stored on a long-term basis. * @param[out] nodeTypeVersion Pointer to the version of the actual node type name corresponding to the forward * @param[out] nodeTypeExtension Pointer to the name of the extension owning nodeTypeName * @return true if the forwardName/forwardVersion found a valid mapping, with the result placed in the return parameters */ OMNI_ATTR("no_py") virtual bool findForward_abi( OMNI_ATTR("c_str, in, not_null") char const* forwardName, int forwardVersion, OMNI_ATTR("*c_str, out, not_null, count=1") char const** nodeTypeName, OMNI_ATTR("out, count=1") int* nodeTypeVersion, OMNI_ATTR("*c_str, out, not_null, count=1") char const** nodeTypeExtension ) noexcept = 0; }; } // namespace unstable } // namespace core } // namespace graph } // namespace omni #include "INodeTypeForwarding.gen.h"
omniverse-code/kit/include/omni/graph/core/unstable/IGenericNodeGraphDef.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IGenericNodeGraphDef.h //! //! @brief Defines @ref omni::graph::core::unstable::IGenericNodeGraphDef. #pragma once #include <omni/graph/exec/unstable/INodeGraphDef.h> #include <omni/graph/core/Handle.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/inspect/IInspector.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class INode; class IGraphBuilder; } } namespace core { namespace unstable { // forward declarations needed by interface declaration class IGenericNodeGraphDef; class IGenericNodeGraphDef_abi; //! Node graph definition interface for OmniGraph graph. //! //! This interface allows us to reason about OmniGraph graphs during graph transformation phase and access authoring //! graph associated with this definition. class IGenericNodeGraphDef_abi : public omni::core::Inherits<exec::unstable::INodeGraphDef, OMNI_TYPE_ID("omni.graph.core.unstable.IGenericNodeGraphDef")> { protected: //! Returns a handle to authoring graph associated with this definition. virtual GraphObj getAuthoringGraph_abi() noexcept = 0; //! Returns True if this graph is used as part of instancing pipeline, false otherwise. //! //! @note We will most likely remove this before making this interface stable virtual bool isInstanced_abi() noexcept = 0; //! Returns execution node associated with a given authoring node, or nullptr //! if no association was discovered in this definition. //! //! The returned @ref INode will *not* have @ref omni::core::IObject::acquire() called before being //! returned. virtual OMNI_ATTR("no_acquire") omni::graph::exec::unstable::INode* getExecutionNode_abi( OMNI_ATTR("in, not_null, throw_if_null, ref") const NodeObj* nodeObj) noexcept = 0; //! Construct the graph. //! //! Construction can happen for entire execution graph or only affected by a topological change definitions. virtual void build_abi(omni::graph::exec::unstable::IGraphBuilder* builder) noexcept = 0; //! Inspect the state of the graph virtual bool inspect_abi( OMNI_ATTR("not_null") inspect::IInspector* inspector) noexcept = 0; //! Acquire internal information about bucketing of instances for vectorized execution //! //! Arguments must not be @c nullptr. virtual void getInstanceInfo_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") omni::graph::exec::unstable::ExecutionTask const* info, OMNI_ATTR("out, not_null, ref") omni::graph::core::InstanceIndex* retBaseInstanceIndex, OMNI_ATTR("out, not_null, ref") size_t* retNumberOfInstances) noexcept = 0; }; } // namespace unstable } // namespace core } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/core/unstable/IGenericNodeGraphDef.gen.h> // custom API declaration //! @copydoc omni::graph::core::IGenericNodeGraphDef_abi class omni::graph::core::unstable::IGenericNodeGraphDef : public omni::core::Generated<omni::graph::core::unstable::IGenericNodeGraphDef_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/INode.h> #include <omni/graph/exec/unstable/IGraphBuilder.h> #include <omni/graph/exec/unstable/ExecutorFactory.h> // custom API implementation // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/core/unstable/IGenericNodeGraphDef.gen.h>
omniverse-code/kit/include/omni/graph/core/unstable/PyINodeTypeForwarding.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindINodeTypeForwarding(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi>>, omni::core::IObject> clsParent(m, "_INodeTypeForwarding"); py::class_<omni::graph::core::unstable::INodeTypeForwarding, omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::unstable::INodeTypeForwarding>, omni::core::IObject> cls(m, "INodeTypeForwarding", R"OMNI_BIND_RAW_(@brief Interface that creates a forward on a request for a node type to a different node type There are a couple of different common use cases for needing a forward: - Node type gets renamed - Node type moves from one extension to another The node type forward specifies the unique node type name so if extension omni.my.extension has a node whose type is specified as "MyNode" then the forward must be from "omni.my.extension.MyNode". The forwarding is version-based as well, where the version is a minimum number required for forwarding, the usual node version update mechanism not withstanding. For example, if you set up a forward from "omni.nodes.MyNode" version 2 to "omni.my_nodes.MyNode" version 3 then any larger version number is forwarded to the same location: - omni.nodes.MyNode(2) -> omni.my_nodes.MyNode(3) - omni.nodes.MyNode(3) -> omni.my_nodes.MyNode(3) - omni.nodes.MyNode(4) -> omni.my_nodes.MyNode(3) The forwards can also have multiple versions forwarding to different locations, so if on top of the above forward you also add a forward from "omni.nodes.MyNode" version 3 to "omni.new_nodes.MyNode" version 4 then these become the example forward locations: - omni.nodes.MyNode(2) -> omni.my_nodes.MyNode(3) - omni.nodes.MyNode(3) -> omni.new_nodes.MyNode(4) - omni.nodes.MyNode(4) -> omni.new_nodes.MyNode(4) Version numbers lower than the first forward are left as-is - omni.nodes.MyNode(1) -> omni.nodes.MyNode(1) @note The usual mechanism of calling updateVersionNumber on a node is only applied after a forward so in the above cases requesting omni.nodes.MyNode(2) does not call updateVersionNumber(1,2) on your omni.nodes.MyNode implementation. Node type forwards are associative, so if A forwards to B and B forwards to C then when you request A you get C. Adding a new forward from omni.my_nodes.MyNode(3) to omni.new_nodes.MyNode(2) above yields this forwarding: - omni.nodes.MyNode(2) -> omni.my_nodes.MyNode(3) -> omni.new_nodes.MyNode(2) - omni.nodes.MyNode(3) -> omni.new_nodes.MyNode(4) - omni.nodes.MyNode(4) -> omni.new_nodes.MyNode(4))OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::unstable::INodeTypeForwarding>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::unstable::INodeTypeForwarding>(); if (!tmp) { throw std::runtime_error( "unable to create omni::graph::core::unstable::INodeTypeForwarding instantiation"); } return tmp; })); cls.def_property_readonly("forward_count", &omni::graph::core::unstable::INodeTypeForwarding::getForwardCount); cls.def("define_forward", [](omni::graph::core::unstable::INodeTypeForwarding* self, const char* forwardName, int forwardVersion, const char* replacementName, int replacementVersion, const char* replacementExtensionId) { auto return_value = self->defineForward( forwardName, forwardVersion, replacementName, replacementVersion, replacementExtensionId); return return_value; }, R"OMNI_BIND_RAW_(@brief Define a new node type forward. It is allowed to have the same forwardName to be defined more than once, however the "forwardVersion" must be different from any existing ones. Later "forwardVersion" numbers will supersede earlier ones in this case. For example if you have these two forwards set up: OldNode,1 -> BetterNode,1,omni.better.extension OldNode,2 -> MuchBetterNode,1,omni.much_better.extension then when version 1 of "OldNode" is requested it will treat it as if you requested "BetterNode", but when versions 2 or later are requested it will instead treat it as if you requested "MuchBetterNode". These can be chained together: OldNode,1 -> BetterNode,1,omni.better.extension BetterNode,1 -> MuchBetterNode,1,omni.much_better.extension @param[in] forwardName Name to be replaced @param[in] forwardVersion The first version of the forward name to be replaced @param[in] replacementName Node type name that replaces the forwarded name @param[in] replacementVersion Version of the node type that replaces the forwarded name @param[in] extensionId Extension ID in which the replacement node type can be found @return false if there was already an forward with the given name and initial version number)OMNI_BIND_RAW_", py::arg("forward_name"), py::arg("forward_version"), py::arg("replacement_name"), py::arg("replacement_version"), py::arg("replacement_extension_id")); cls.def("remove_forward", [](omni::graph::core::unstable::INodeTypeForwarding* self, const char* forwardName, int forwardVersion) { auto return_value = self->removeForward(forwardName, forwardVersion); return return_value; }, R"OMNI_BIND_RAW_(@brief Remove an existing node type forward. Since an forwardName + forwardVersion combination is unique there is no need to pass in the replacement information. Only the forward with the matching version is removed. Any others with the same name remain untouched. @param[in] forwardName Forward to be removed @param[in] forwardVersion The version at which the forward is to be removed @return false if there was already an forward with the given name and initial version number)OMNI_BIND_RAW_", py::arg("forward_name"), py::arg("forward_version")); cls.def("remove_forwarded_type", [](omni::graph::core::unstable::INodeTypeForwarding* self, const char* referencedName, int referencedVersion) { auto return_value = self->removeForwardedType(referencedName, referencedVersion); return return_value; }, R"OMNI_BIND_RAW_(@brief Remove forwards referencing a given node type name. @param[in] referencedName Forward to be removed @param[in] referencedVersion The version at which the forward is to be removed @return number of forwards to the given type that were removed)OMNI_BIND_RAW_", py::arg("referenced_name"), py::arg("referenced_version")); return omni::python::PyBind<omni::graph::core::unstable::INodeTypeForwarding>::bind(cls); }
omniverse-code/kit/include/omni/graph/core/unstable/IPrivateNodeGraphDef.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! This is a private interface. Rather than directly using this private interface, //! access the functionality this interface provides by subclassing GenericGraphDef template <> class omni::core::Generated<omni::graph::core::unstable::IPrivateNodeGraphDef_abi> : public omni::graph::core::unstable::IPrivateNodeGraphDef_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::unstable::IPrivateNodeGraphDef") //! Returns a handle to authoring graph associated with this definition. omni::graph::core::GraphObj getAuthoringGraph() noexcept; //! Returns True if this graph is used as part of instancing pipeline, false otherwise. //! //! @note We will most likely remove this before making this interface stable bool isInstanced() noexcept; //! Internal binding to receive invalidation messages. //! //! @note Once OG will provide topology changed notifications we can register to, this should go away. void attachToAuthoring(omni::core::ObjectParam<omni::graph::exec::unstable::IDef> definition) noexcept; //! Pre-execution call can be used to setup the graph state prior to execution or skip entirely the execution. //! //! The given task must not be @c nullptr. omni::graph::exec::unstable::Status preExecute(omni::graph::exec::unstable::ExecutionTask& info) noexcept; //! Post-execution call can be used to finalize the execution, e.g. transfer computation results to consumers. //! //! The given task must not be @c nullptr. omni::graph::exec::unstable::Status postExecute(omni::graph::exec::unstable::ExecutionTask& info) noexcept; //! Acquire internal information about bucketing of instances for vectorized execution //! //! Arguments must not be @c nullptr. void getInstanceInfo(const omni::graph::exec::unstable::ExecutionTask& info, omni::graph::core::InstanceIndex& retBaseInstanceIndex, size_t& retNumberOfInstances) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::core::GraphObj omni::core::Generated< omni::graph::core::unstable::IPrivateNodeGraphDef_abi>::getAuthoringGraph() noexcept { return getAuthoringGraph_abi(); } inline bool omni::core::Generated<omni::graph::core::unstable::IPrivateNodeGraphDef_abi>::isInstanced() noexcept { return isInstanced_abi(); } inline void omni::core::Generated<omni::graph::core::unstable::IPrivateNodeGraphDef_abi>::attachToAuthoring( omni::core::ObjectParam<omni::graph::exec::unstable::IDef> definition) noexcept { attachToAuthoring_abi(definition.get()); } inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::core::unstable::IPrivateNodeGraphDef_abi>::preExecute( omni::graph::exec::unstable::ExecutionTask& info) noexcept { return preExecute_abi(&info); } inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::core::unstable::IPrivateNodeGraphDef_abi>::postExecute( omni::graph::exec::unstable::ExecutionTask& info) noexcept { return postExecute_abi(&info); } inline void omni::core::Generated<omni::graph::core::unstable::IPrivateNodeGraphDef_abi>::getInstanceInfo( const omni::graph::exec::unstable::ExecutionTask& info, omni::graph::core::InstanceIndex& retBaseInstanceIndex, size_t& retNumberOfInstances) noexcept { getInstanceInfo_abi(&info, &retBaseInstanceIndex, &retNumberOfInstances); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/unstable/GenericNodeGraphDef.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file NodeDef.h //! //! @brief Declares @ref omni::graph::core::unstable::IGenericNodeGraphDef. #pragma once #include <carb/profiler/Profile.h> #include <omni/String.h> #include <omni/graph/core/IInternal.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/unstable/IGenericNodeGraphDef.h> #include <omni/graph/exec/unstable/NodeGraphDef.h> #include <omni/inspect/IInspectJsonSerializer.h> namespace omni { namespace graph { namespace core { namespace unstable { //! @copydoc omni::graph::exec::unstable::IGenericNodeGraphDef class GenericNodeGraphDef : public exec::unstable::NodeGraphDefT<IGenericNodeGraphDef, exec::unstable::INodeGraphDefDebug> { public: //! Construct generic OmniGraph definition. //! //! This class is a base class for all OmniGraph node graph definitions and will use provided executor. //! //! @param builder Builder constructing the graph //! @param executorFactory Executor factory to use for this instance of the graph //! @param definitionName Definition name is considered as a token that transformation passes can register against //! @param graphObj Authoring graph associated with this definition //! @param isInstanced Is this an instanced OmniGraph //! //! May throw. static omni::core::ObjectPtr<GenericNodeGraphDef> create(exec::unstable::IGraphBuilder* builder, exec::unstable::ExecutorFactory executorFactory, const char* definitionName, const GraphObj& graphObj, bool isInstanced) { OMNI_THROW_IF_ARG_NULL(builder); OMNI_THROW_IF_ARG_NULL(definitionName); return omni::core::steal(new GenericNodeGraphDef(builder, executorFactory, definitionName, graphObj, isInstanced)); } void build_abi(exec::unstable::IGraphBuilder* builder) noexcept override { // make sure the topology of the graph is valid...this may not be the case if graph is empty // because if happens lazily when requesting root node getRoot()->validateOrResetTopology(); // attach to authoring graph m_privateDef->attachToAuthoring(this); } bool inspect_abi(inspect::IInspector* inspector) noexcept override { return true; } static omni::string getDefinitionName(const GraphObj& graphObj) { static const omni::string sDefNamePrefix = "og.def.graph_"; return sDefNamePrefix + graphObj.iGraph->getEvaluatorName(graphObj); } protected: using BaseType = exec::unstable::NodeGraphDefT<IGenericNodeGraphDef, exec::unstable::INodeGraphDefDebug>; GraphObj getAuthoringGraph_abi() noexcept override { return m_privateDef->getAuthoringGraph(); } bool isInstanced_abi() noexcept override { return m_privateDef->isInstanced(); } exec::unstable::INode* getExecutionNode_abi(const NodeObj* nodeObj) noexcept override { return nullptr; } exec::unstable::Status preExecute_abi(exec::unstable::ExecutionTask* info) noexcept override { return m_privateDef->preExecute(*info); } exec::unstable::Status postExecute_abi(exec::unstable::ExecutionTask* info) noexcept override { return m_privateDef->postExecute(*info); } void getInstanceInfo_abi(omni::graph::exec::unstable::ExecutionTask const* info, InstanceIndex* retBaseInstanceIndex, size_t* retNumberOfInstances) noexcept override { m_privateDef->getInstanceInfo(*info, *retBaseInstanceIndex, *retNumberOfInstances); } GenericNodeGraphDef(exec::unstable::IGraphBuilder* builder, exec::unstable::ExecutorFactory executorFactory, const char* definitionName, const GraphObj& graphObj, bool isInstanced) : BaseType(builder->getGraph(), executorFactory, definitionName) // may throw , m_privateDef( carb::getCachedInterface<omni::graph::core::IInternal>()->createPrivateGraphDef(graphObj, isInstanced), omni::core::kSteal) { } private: omni::core::ObjectPtr<IPrivateNodeGraphDef> m_privateDef; //!< Internal object, hiding implementation details //!< relying on OG internals }; } // namespace unstable } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/unstable/INodeTypeForwarding.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL /** * @brief Interface that creates a forward on a request for a node type to a different node type * * There are a couple of different common use cases for needing a forward: * - Node type gets renamed * - Node type moves from one extension to another * * The node type forward specifies the unique node type name so if extension omni.my.extension has a node whose type * is specified as "MyNode" then the forward must be from "omni.my.extension.MyNode". * * The forwarding is version-based as well, where the version is a minimum number required for forwarding, the usual * node version update mechanism not withstanding. For example, if you set up a forward from "omni.nodes.MyNode" version * 2 to "omni.my_nodes.MyNode" version 3 then any larger version number is forwarded to the same location: * - omni.nodes.MyNode(2) -> omni.my_nodes.MyNode(3) * - omni.nodes.MyNode(3) -> omni.my_nodes.MyNode(3) * - omni.nodes.MyNode(4) -> omni.my_nodes.MyNode(3) * * The forwards can also have multiple versions forwarding to different locations, so if on top of the above forward * you also add a forward from "omni.nodes.MyNode" version 3 to "omni.new_nodes.MyNode" version 4 then these become * the example forward locations: * - omni.nodes.MyNode(2) -> omni.my_nodes.MyNode(3) * - omni.nodes.MyNode(3) -> omni.new_nodes.MyNode(4) * - omni.nodes.MyNode(4) -> omni.new_nodes.MyNode(4) * * Version numbers lower than the first forward are left as-is * - omni.nodes.MyNode(1) -> omni.nodes.MyNode(1) * * @note The usual mechanism of calling updateVersionNumber on a node is only applied after a forward so in the above * cases requesting omni.nodes.MyNode(2) does not call updateVersionNumber(1,2) on your omni.nodes.MyNode * implementation. * * Node type forwards are associative, so if A forwards to B and B forwards to C then when you request A you get C. * Adding a new forward from omni.my_nodes.MyNode(3) to omni.new_nodes.MyNode(2) above yields this forwarding: * - omni.nodes.MyNode(2) -> omni.my_nodes.MyNode(3) -> omni.new_nodes.MyNode(2) * - omni.nodes.MyNode(3) -> omni.new_nodes.MyNode(4) * - omni.nodes.MyNode(4) -> omni.new_nodes.MyNode(4) */ template <> class omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi> : public omni::graph::core::unstable::INodeTypeForwarding_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::unstable::INodeTypeForwarding") /** * @returns Number of currently defined forwards */ size_t getForwardCount() noexcept; /** * @brief Get the list of available forwards and their redirections. * Note that there is no guarantee that the replacement names are legitimate node type names, only that they * have been registered as replacements. * * The caller is responsible for allocating and destroying buffers large enough to hold "bufferSize" results. * If bufferSize > getForwardCount() then the entries at the ends of the buffers will be filled with nullptr. * * @param[out] forwardNameBuffer List of forward names to be replaced * @param[out] forwardVersionBuffer List of the first version of each forward name to be replaced * @param[out] replacementNameBuffer List of node type names that replace the forwarded names * @param[out] replacementVersionBuffer List of node type versions that replace the forwarded names * @param[out] extensionIdBuffer List of extension IDs corresponding to the replacement node type names. * @param[in] bufferSize Number of entries to fill in the buffers * @return true if the buffers were successfully filled and the bufferSize matched the forward count */ bool getForwards(const char** forwardNameBuffer, int* forwardVersionBuffer, const char** replacementNameBuffer, int* replacementVersionBuffer, const char** extensionIdBuffer, size_t bufferSize) noexcept; /** * @brief Define a new node type forward. * It is allowed to have the same forwardName to be defined more than once, however the "forwardVersion" must be * different from any existing ones. Later "forwardVersion" numbers will supersede earlier ones in this case. * For example if you have these two forwards set up: * OldNode,1 -> BetterNode,1,omni.better.extension * OldNode,2 -> MuchBetterNode,1,omni.much_better.extension * then when version 1 of "OldNode" is requested it will treat it as if you requested "BetterNode", but when * versions 2 or later are requested it will instead treat it as if you requested "MuchBetterNode". These can be * chained together: * OldNode,1 -> BetterNode,1,omni.better.extension * BetterNode,1 -> MuchBetterNode,1,omni.much_better.extension * * @param[in] forwardName Name to be replaced * @param[in] forwardVersion The first version of the forward name to be replaced * @param[in] replacementName Node type name that replaces the forwarded name * @param[in] replacementVersion Version of the node type that replaces the forwarded name * @param[in] extensionId Extension ID in which the replacement node type can be found * @return false if there was already an forward with the given name and initial version number */ bool defineForward(const char* forwardName, int forwardVersion, const char* replacementName, int replacementVersion, const char* replacementExtensionId) noexcept; /** * @brief Remove an existing node type forward. * Since an forwardName + forwardVersion combination is unique there is no need to pass in the replacement * information. Only the forward with the matching version is removed. Any others with the same name remain * untouched. * * @param[in] forwardName Forward to be removed * @param[in] forwardVersion The version at which the forward is to be removed * @return false if there was already an forward with the given name and initial version number */ bool removeForward(const char* forwardName, int forwardVersion) noexcept; /** * @brief Remove forwards referencing a given node type name. * * @param[in] referencedName Forward to be removed * @param[in] referencedVersion The version at which the forward is to be removed * @return number of forwards to the given type that were removed */ size_t removeForwardedType(const char* referencedName, int referencedVersion) noexcept; /** * @brief Find a node type name replacement corresponding to the given node type forward name and version * * @param[in] forwardName Name of the node type forward to look up * @param[in] forwardVersion Version number of the node type forward being looked up * @param[out] nodeTypeName Pointer to the name of the actual node type name corresponding to the forward, * or nullptr if there is no equivalent. This string is a constant and managed * by the node type forward system so it should not be stored on a long-term basis. * @param[out] nodeTypeVersion Pointer to the version of the actual node type name corresponding to the forward * @param[out] nodeTypeExtension Pointer to the name of the extension owning nodeTypeName * @return true if the forwardName/forwardVersion found a valid mapping, with the result placed in the return * parameters */ bool findForward(const char* forwardName, int forwardVersion, const char** nodeTypeName, int* nodeTypeVersion, const char** nodeTypeExtension) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline size_t omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi>::getForwardCount() noexcept { return getForwardCount_abi(); } inline bool omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi>::getForwards( const char** forwardNameBuffer, int* forwardVersionBuffer, const char** replacementNameBuffer, int* replacementVersionBuffer, const char** extensionIdBuffer, size_t bufferSize) noexcept { return getForwards_abi(forwardNameBuffer, forwardVersionBuffer, replacementNameBuffer, replacementVersionBuffer, extensionIdBuffer, bufferSize); } inline bool omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi>::defineForward( const char* forwardName, int forwardVersion, const char* replacementName, int replacementVersion, const char* replacementExtensionId) noexcept { return defineForward_abi(forwardName, forwardVersion, replacementName, replacementVersion, replacementExtensionId); } inline bool omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi>::removeForward( const char* forwardName, int forwardVersion) noexcept { return removeForward_abi(forwardName, forwardVersion); } inline size_t omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi>::removeForwardedType( const char* referencedName, int referencedVersion) noexcept { return removeForwardedType_abi(referencedName, referencedVersion); } inline bool omni::core::Generated<omni::graph::core::unstable::INodeTypeForwarding_abi>::findForward( const char* forwardName, int forwardVersion, const char** nodeTypeName, int* nodeTypeVersion, const char** nodeTypeExtension) noexcept { return findForward_abi(forwardName, forwardVersion, nodeTypeName, nodeTypeVersion, nodeTypeExtension); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/unstable/IGenericNodeDef.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Node definition interface for OmniGraph nodes. //! //! This interface allows us to reason about OmniGraph nodes during graph transformation phase and access authoring //! node associated with this definition. //! //! Execution of this definition will callback to OmniGraph node compute method. template <> class omni::core::Generated<omni::graph::core::unstable::IGenericNodeDef_abi> : public omni::graph::core::unstable::IGenericNodeDef_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::unstable::IGenericNodeDef") //! Returns a handle to authoring node associated with this definition. omni::graph::core::NodeObj getAuthoringNode() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::core::NodeObj omni::core::Generated<omni::graph::core::unstable::IGenericNodeDef_abi>::getAuthoringNode() noexcept { return getAuthoringNode_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/unstable/IPrivateNodeDef.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! This is a private interface. Rather than directly using this private interface, //! access the functionality this interface provides by subclassing GenericNodeDef template <> class omni::core::Generated<omni::graph::core::unstable::IPrivateNodeDef_abi> : public omni::graph::core::unstable::IPrivateNodeDef_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::unstable::IPrivateNodeDef") //! Acquire internal information about bucketing of instances for vectorized execution //! //! Arguments must not be @c nullptr. void getInstanceInfo(omni::graph::exec::unstable::ExecutionTask& info, omni::graph::core::InstanceIndex& retBaseInstanceIndex, size_t& retNumberOfInstances) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline void omni::core::Generated<omni::graph::core::unstable::IPrivateNodeDef_abi>::getInstanceInfo( omni::graph::exec::unstable::ExecutionTask& info, omni::graph::core::InstanceIndex& retBaseInstanceIndex, size_t& retNumberOfInstances) noexcept { getInstanceInfo_abi(&info, &retBaseInstanceIndex, &retNumberOfInstances); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/unstable/IAttributeTemplate.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // // This ABI is unstable and subject to change /* _ _ _____ ______ _______ __ ______ _ _ _____ ______ ___ _ _____ _____ _____ _ __ | | | |/ ____| ____| /\|__ __| \ \ / / __ \| | | | __ \ / __ \ \ / / \ | | | __ \|_ _|/ ____| |/ / | | | | (___ | |__ / \ | | \ \_/ / | | | | | | |__) | | | | \ \ /\ / /| \| | | |__) | | | | (___ | ' / | | | |\___ \| __| / /\ \ | | \ /| | | | | | | _ / | | | |\ \/ \/ / | . ` | | _ / | | \___ \| < | |__| |____) | |____ / ____ \| | | | | |__| | |__| | | \ \ | |__| | \ /\ / | |\ | | | \ \ _| |_ ____) | . \ \____/|_____/|______| /_/ \_\_| |_| \____/ \____/|_| \_\ \____/ \/ \/ |_| \_| |_| \_\_____|_____/|_|\_| */ #pragma once #include <omni/graph/core/iComputeGraph.h> namespace omni { namespace graph { namespace core { namespace unstable { // ====================================================================== // Support for attribute templates types class AttributeTemplateHandle : public HandleBase<HandleInt, AttributeTemplateHandle> { public: using BaseHandleType = HandleInt; using HandleBase<HandleInt, AttributeTemplateHandle>::HandleBase; static constexpr HandleInt invalidValue() { return 0; } }; struct IAttributeTemplate; struct AttributeTemplateObj { const IAttributeTemplate* iAttributeTemplate; AttributeTemplateHandle attributeTemplateHandle; bool isValid() const { return attributeTemplateHandle.isValid(); } }; /** * Structure with information about the default data stored * in an attribute template */ struct AttributeTemplateDefaultData { // Pointer to the data. const uint8_t* dataPtr; // The size of a single item in the array. For tuples, this represents the size of the tuple. size_t itemSizeInBytes; // The number of items stored. For scalars and tuples this is always 1, otherwise it is the number of elements // in the array. size_t elementCount; }; /** * Carb interface representing an attribute template. An AttributeTemplate is the definition used * to create an attribute on a Node when created from a NodeType. * */ struct IAttributeTemplate { CARB_PLUGIN_INTERFACE("omni::graph::core::unstable::IAttributeTemplate", 0, 2); /** * Returns the name of the attribute template, which includes any namespace prefixes prepended * by the NodeType. * * @param[in] attributeObj. The attribute template to inspect * @returns Retreives the name of the attribute template wit */ const char*(CARB_ABI* getName)(const AttributeTemplateObj& attributeObj); /** * Returns a string of the typename that will be used when creating the attribute. * * For extended attribute types, this will return "token". * @see IAttribute::getTypeName * @param[in] attributeObj. The attribute template to inspect * @return The type name of the attribute template. */ const char*(CARB_ABI* getTypeName)(const AttributeTemplateObj& attributeObj); /** * Gets the AttributePortType of the attribute template. Use this to check whether * an attribute template is an input, output, or state. * * @param[in] attributeObj. The attribute template to inspect * @return The AttributePortType of the attribute template. */ AttributePortType(CARB_ABI* getPortType)(const AttributeTemplateObj& attributeObj); /** * Returns the extended type, if any, of the attribute template. Extended types are values such as "union" * and "any" that indicate an attribute template can hold a set of different type. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @return The extended type of the attribute template. */ ExtendedAttributeType(CARB_ABI* getExtendedAttributeType)(const AttributeTemplateObj& attributeObj); /** * Returns the type of the attribute template object. If the extended type is * kExtendedAttributeType_Regular, this will return the assigned type to the port, * otherwise it will return type representing an unknown type to indicate multiple values exist. * * To fetch all support types, use getExtendedAttributeTypes. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @return The type of the attribute template. */ omni::graph::core::Type(CARB_ABI* getAttributeType)(const AttributeTemplateObj& attributeObj); /** * Returns the number of compatible types with an attribute template. If the extended type is a union type, * returns the number of concrete types available.If the extended type is a regular type, this always * returns 1. If is an "any", this will always return 0. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @return The number of compatible types */ size_t(CARB_ABI* getExtendedAttributeTypeCount)(const AttributeTemplateObj& attributeObj); /** * Returns the list of compatible types. Note that 0 types are returned if the attribute template * extended type is "any". * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @param[out] typeBuffer A buffer to store the compatible types. * @param[in] typeBufferCount The number of types that can be stored in typeBuffer. * @return The number of compatible types copied to typeBuffer. */ size_t(CARB_ABI* getExtendedAttributeTypes)(const AttributeTemplateObj& attributeObj, omni::graph::core::Type* typeBuffer, size_t typeBufferCount); /** * Sets the extended type of the attribute. Note that this may clear any default data for the type, * as default data is only valid Regular typed attribute templates. * * When setting the extended type to be a regular type, use the setType function instead. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @param[in] attributeType The ExtendedAttributeType to change the template to. * @param[in] typeList For union types, a comma separated list of union names or types to support. */ void(CARB_ABI* setExtendedType)(const AttributeTemplateObj& attributeObj, ExtendedAttributeType attributeType, const char* typeList); /** * Sets the attribute template to use a regular (non-extended) type. * * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @param[in] attributeType The type to set. * @param[in] defaultValuePtr Pointer to default data to set on this type. Can be null to indicate * no default data. * @param[in] defaultElemCount If the type is an array type, this is the number of items. It is ignored for * scalar types. */ void(CARB_ABI* setType)(const AttributeTemplateObj& attributeObj, omni::graph::core::Type attributeType, const void* defaultValuePtr, size_t defaultElemCount); /** * If an attribute template is a compound node type, make the template expose an attribute on the associated compound graph. * If the attribute template is an output, only one connection is supported, and this will overwrite the connection. * If the attribute template is an input, this will append to the current list of connections. * * The connection must be on a child node of the graph template of the compound node type. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @param[in] path. The path to attribute to connect to. * @returns True if the connection was successfully set, false otherwise. */ bool(CARB_ABI* connectByPath)(const AttributeTemplateObj& attributeObj, const omni::fabric::PathC& path); /** * Disconnect all connections from the attribute template. Only works if the attribute template is * part of a compound node type * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. */ void(CARB_ABI* disconnectAll)(const AttributeTemplateObj& attributeObj); /** * Disconnect an attribute from the attribute template. Only applies if the attribute template is * part of a compound node type. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @param[path] path The path to disconnect. * @returns True if the path was found and disconnected, false otherwise. */ bool(CARB_ABI* disconnectByPath)(const AttributeTemplateObj& attributeObj, const omni::fabric::PathC& path); /** * Retrieves the number of connections for an attribute template. Only applies if the attribute template is * part of a compound node type. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. */ size_t(CARB_ABI* getConnectionCount)(const AttributeTemplateObj& attributeObj); /** * Retrieves the connections for an attribute template. Only applies if the attribute template is part of a * compound node type. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @param[out] pathsBuffer Pointer a buffer to store the paths * @param[in] pathBufferCount The number of paths that can be stored in pathsBuffer. */ size_t(CARB_ABI* getConnections)(const AttributeTemplateObj& attributeObj, omni::fabric::PathC* pathsBuffer, size_t pathBufferCount); /** * Returns the number of metadata entries on this attribute template. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @return the number of metadata key/value pairs on this attribute */ size_t(CARB_ABI* getMetadataCount)(const AttributeTemplateObj& attributeObj); /** * Returns the set of all metadata on this attribute template. * * The keyBuf and valueBuf arrays preallocated by the caller, and contain at least "getMetadataCount()" * entries in them. * All returned strings are owned by the node type and not to be destroyed. * The returned keyBuf and valueBuf must have exactly the same size with corresponding index values; that is * keyBuf[i] is the metadata name for the string in valueBuf[i]. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @param[out] keyBuf Buffer in which to put the list of metadata keys * @param[out] valueBuf Buffer in which to put the list of metadata values * @param[in] bufferSize the number of strings each of the two buffers is able to hold * @return Number of metadata items successfully populated */ size_t(CARB_ABI* getAllMetadata)(const AttributeTemplateObj& attributeObj, const char** keyBuf, const char** valueBuf, size_t bufferSize); /** * Retrieves a metadata value from this attribute * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @param[in] key The name of the metadata to be retrieved * @return The value of the metadata, or nullptr if the named metadata was not set on this attribute */ const char*(CARB_ABI* getMetadata)(const AttributeTemplateObj& attributeObj, const char* key); /** * Sets a metadata value on this attribute template. Metadata applied to an AttributeTemplate does * not apply to the underlying USD attribute itself, but will be applied to any corresponding * attributes that are created from it. * * @param[in] attrObj Reference to the attribute template.. * @param[in] key The keyword, used as the name of the metadata. Cannot be null. * @param[in] value The value of the metadata. Only string values are supported. if null, clears the existing * metadata with the given key * @return true if the keyword was successfully set */ bool(CARB_ABI* setMetadata)(const AttributeTemplateObj& attrObj, const char* key, const char* value); /** Retrieves a readonly memory buffer containing the default data set on this attribute templates. * If there is no default data set then nullptr will be returned. * * @param[in] attributeObj Reference to the AttributeTemplateObj representing the attribute template. * @return Structure containing the raw data storing the default value and information about the memory stored. * The data format will change depending on the value returned by getAttributeType(). */ AttributeTemplateDefaultData(CARB_ABI* getDefaultData)(const AttributeTemplateObj& attributeObj); }; STRUCT_INTEGRITY_CHECK(IAttributeTemplate, getDefaultData, 18); } } } }
omniverse-code/kit/include/omni/graph/core/unstable/SafeDispatch.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file SafeDispatch.h //! //! @brief Declares a helper for OmniGraph types to run with serial initial frame. Also adds support for debugging //! options. #pragma once #include <omni/graph/exec/unstable/Executor.h> #include <omni/kit/exec/core/unstable/IExecutionContext.h> #include <omni/kit/exec/core/unstable/IExecutionGraphSettings.h> namespace omni { namespace graph { namespace core { namespace unstable { //! Class used to control globally how things should dispatch within OmniGraph struct SafeDispatch { //! Method called by executor to determine scheduling info static exec::unstable::SchedulingInfo getSchedulingInfo(const omni::graph::exec::unstable::ExecutionTask& task) { if (kit::exec::core::unstable::getExecutionGraphSettings()->shouldForceSerial()) return exec::unstable::SchedulingInfo::eSchedulerBypass; else if (getNeedsIsolation(task)) return exec::unstable::SchedulingInfo::eIsolate; else if (kit::exec::core::unstable::getExecutionGraphSettings()->shouldForceParallel()) return graph::exec::unstable::SchedulingInfo::eParallel; else return graph::exec::unstable::DefaultSchedulingStrategy::getSchedulingInfo(task); } //! Is task forced to execute in isolation. Currently used to stabilize the graph before parallel execution. static bool getNeedsIsolation(const omni::graph::exec::unstable::ExecutionTask& task) { auto* context = exec::unstable::cast<kit::exec::core::unstable::IExecutionContext>(task.getContext()); auto* nodeState = context->getStateInfo(task); const bool alreadyComputed = nodeState->getExecutionStamp().isValid(); nodeState->setExecutionStamp(context->getExecutionStamp()); return !alreadyComputed; } }; } // namespace unstable } // namespace core } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/core/unstable/IPrivateNodeDef.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IPrivateNodeDef.h //! //! @brief Defines @ref omni::graph::core::unstable::IPrivateNodeDef. #pragma once #include <omni/graph/core/Handle.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/exec/unstable/ConstName.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/SchedulingInfo.h> #include <omni/graph/exec/unstable/Status.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class ExecutionTask; } } namespace core { namespace unstable { // forward declarations needed by interface declaration class IPrivateNodeDef; class IPrivateNodeDef_abi; //! This is a private interface. Rather than directly using this private interface, //! access the functionality this interface provides by subclassing GenericNodeDef class IPrivateNodeDef_abi : public omni::core::Inherits<exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.core.unstable.IPrivateNodeDef")> { protected: //! Acquire internal information about bucketing of instances for vectorized execution //! //! Arguments must not be @c nullptr. virtual void getInstanceInfo_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") omni::graph::exec::unstable::ExecutionTask* info, OMNI_ATTR("out, not_null, ref") InstanceIndex* retBaseInstanceIndex, OMNI_ATTR("out, not_null, ref") size_t* retNumberOfInstances) noexcept = 0; }; } // namespace unstable } // namespace core } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/core/unstable/IPrivateNodeDef.gen.h> // custom API declaration //! @copydoc omni::graph::core::IPrivateNodeDef_abi class omni::graph::core::unstable::IPrivateNodeDef : public omni::core::Generated<omni::graph::core::unstable::IPrivateNodeDef_abi> { }; // custom API implementation // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/core/unstable/IPrivateNodeDef.gen.h>
omniverse-code/kit/include/omni/graph/core/unstable/IGenericNodeDef.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IGenericNodeDef.h //! //! @brief Defines @ref omni::graph::core::unstable::IGenericNodeDef. #pragma once #include <omni/graph/exec/unstable/INodeDef.h> #include <omni/core/IObject.h> #include <omni/graph/core/Handle.h> namespace omni { namespace graph { namespace core { namespace unstable { // forward declarations needed by interface declaration class IGenericNodeDef; class IGenericNodeDef_abi; //! Node definition interface for OmniGraph nodes. //! //! This interface allows us to reason about OmniGraph nodes during graph transformation phase and access authoring //! node associated with this definition. //! //! Execution of this definition will callback to OmniGraph node compute method. class IGenericNodeDef_abi : public omni::core::Inherits<exec::unstable::INodeDef, OMNI_TYPE_ID("omni.graph.core.unstable.IGenericNodeDef")> { protected: //! Returns a handle to authoring node associated with this definition. virtual NodeObj getAuthoringNode_abi() noexcept = 0; }; //! Smart pointer managing an instance of @ref IGenericNodeDef. using GenericNodeDefPtr = omni::core::ObjectPtr<IGenericNodeDef>; } // namespace unstable } // namespace core } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/core/unstable/IGenericNodeDef.gen.h> // custom API declaration //! @copydoc omni::graph::core::IGenericNodeDef_abi class omni::graph::core::unstable::IGenericNodeDef : public omni::core::Generated<omni::graph::core::unstable::IGenericNodeDef_abi> { }; // additional headers needed for API implementation // ... // custom API implementation // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/core/unstable/IGenericNodeDef.gen.h>
omniverse-code/kit/include/omni/graph/core/unstable/ICompoundNodeType.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // // This ABI is unstable and subject to change /* _ _ _____ ______ _______ __ ______ _ _ _____ ______ ___ _ _____ _____ _____ _ __ | | | |/ ____| ____| /\|__ __| \ \ / / __ \| | | | __ \ / __ \ \ / / \ | | | __ \|_ _|/ ____| |/ / | | | | (___ | |__ / \ | | \ \_/ / | | | | | | |__) | | | | \ \ /\ / /| \| | | |__) | | | | (___ | ' / | | | |\___ \| __| / /\ \ | | \ /| | | | | | | _ / | | | |\ \/ \/ / | . ` | | _ / | | \___ \| < | |__| |____) | |____ / ____ \| | | | | |__| | |__| | | \ \ | |__| | \ /\ / | |\ | | | \ \ _| |_ ____) | . \ \____/|_____/|______| /_/ \_\_| |_| \____/ \____/|_| \_\ \____/ \/ \/ |_| \_| |_| \_\_____|_____/|_|\_| */ #pragma once #include <omni/graph/core/unstable/IAttributeTemplate.h> namespace omni { namespace graph { namespace core { namespace unstable { struct ICompoundNodeType; class CompoundNodeTypeHandle : public HandleBase<NodeTypeHandle, CompoundNodeTypeHandle> { public: using HandleBase<NodeTypeHandle, CompoundNodeTypeHandle>::HandleBase; static constexpr NodeTypeHandle invalidValue() { return kInvalidNodeTypeHandle; } }; struct CompoundNodeTypeObj { const ICompoundNodeType* iNodeType; //!< interface to functionality on the compound node type CompoundNodeTypeHandle nodeTypeHandle; //!< opaque handle to actual underlying node type - managed by OmniGraph bool isValid() const { return nodeTypeHandle.isValid(); } }; /** * Carb interface for manipulating compound node type objects * */ struct ICompoundNodeType { CARB_PLUGIN_INTERFACE("omni::graph::core::unstable::ICompoundNodeType", 0, 1); /** * Given an existing node type object, casts it to a compound node type object * * @param[in] Node Type object to convert * @returns A CompoundNodeTypeObj representing the same Node Type. If the Node Type * is not a Compound Node Type, the returned object will not be valid. */ CompoundNodeTypeObj(CARB_ABI* getCompoundNodeType)(const NodeTypeObj& nodeType); /** * Finds a compound node type by its USD SdfPath * * @param[in] An SdfPath to the location of the correspond UsdPrim representing the Compound Node Type * @returns An object representing the compound node type, or an invalid object * if no compound node type exists at the given path */ CompoundNodeTypeObj(CARB_ABI* findCompoundNodeTypeByPath)(const char* path); /** * Create a Compound Node Type object at the given path. * * @param[in] compoundName The name of the compound to be created. This will be the name of the * compound USD object, and the default name in registration. Cannot be null * and must comform to USD naming standards. * @param[in] graphDefinitionName The name of Omnigraph definition created defining the function of the * compound. Cannot be null and must conform to USD naming standards. * @param[in] nameSpace The namespace of the compound when registered in the node library. Can be * null, which indicates a default of "local.nodes". * @param[in] compoundFolder Sdf style Path the compound folder to store the compound in. Can be null, * which indicates to use the default folder, which is a scope named "Compounds" * parented to the default prim of the stage. If the folder does not exist, * it will be created. * @param[in] evaluatorType The type of the evaluator to associate with the graph * @returns A CompoundNodeType object. */ CompoundNodeTypeObj(CARB_ABI* createCompoundNodeType)(const char* compoundName, const char* graphDefinitionName, const char* nameSpace, const char* compoundFolder, const char* evaluatorType); /** * Retrieves the default folder path where compound node types are placed on the stage. * This is typically a Usd::Scope under the default prim. e.g. "/World/Compounds". * * @returns The path to the folder. */ omni::fabric::PathC(CARB_ABI* getDefaultFolder)(); /** * Finds the input with the given name. The 'inputs:' prefix is optional. * * @param nodeType. The compound node type to search * @param inputName. The name to search for. * * @returns An AttributeTemplateObj representing the input. If the input is not found, the * returned objects isValid() method will return false. */ AttributeTemplateObj(CARB_ABI* findInput)(const CompoundNodeTypeObj& nodeType, const char* inputName); /** * Finds the output with the given name. The 'outputs:' prefix is optional. * * @param nodeType. The compound node type to search * @param outputName. The name to search for. * * @returns An AttributeTemplateObj representing the output. If the output is not found, the * returned objects isValid() method will return false. */ AttributeTemplateObj(CARB_ABI* findOutput)(const CompoundNodeTypeObj& nodeType, const char* outputName); /** * Removes an input with the given name. The 'inputs:' prefix is optional. * * @param nodeType. The compound node type to remove an input from. * @param inputName. The name of the input to remove. * * @returns True if the input was removed, false otherwise. */ bool(CARB_ABI* removeInputByName)(const CompoundNodeTypeObj& nodeType, const char* inputName); /** * Removes an output with the given name. The 'outputs:' prefix is optional. * * @param nodeType. The compound node type to remove an output from. * @param outputName. The name of the output to remove. * * @returns True if the output was removed, false otherwise. */ bool(CARB_ABI* removeOutputByName)(const CompoundNodeTypeObj& nodeType, const char* outputName); /** * Downcasts a CompoundNodeTypeObj to a regular NodeTypeObj. * * @param nodeType. The compound node type to downcast. * @returns NodeType object also representing the compound node. If the ComoundNodeType object is not * valid, an invalid NodeType object will be returned. */ NodeTypeObj(CARB_ABI* asNodeType)(const CompoundNodeTypeObj& nodeType); /** * Get the number of input attribute templates on this node type * * @param nodeType. The compound node type to fetch inputs from. * @returns The number of input attributes on the compound node type. */ size_t(CARB_ABI* getInputCount)(const CompoundNodeTypeObj& nodeType); /** * Retrieves the input attributes from a compound node type. * * @param nodeType. The compound node type to retreive inputs from. * @param templateBuffer. Buffer to hold the returned objects * @param bufferElementCount. The number of objects the buffer can hold. * * @returns The number of items copied to bufferElementCount. */ size_t(CARB_ABI* getInputs)(const CompoundNodeTypeObj& nodeType, AttributeTemplateObj* templateBuffer, size_t bufferElementCount); /** * Get the number of output attribute templates on this node type. * * @param nodeType. The compound node type to fetch outputs from. * @returns The number of output attributes on the compound node type. */ size_t(CARB_ABI* getOutputCount)(const CompoundNodeTypeObj& nodeType); /** * Retrieves the output attributes from a compound node type. * * @param nodeType. The compound node type to retreive outputs from. * @param templateBuffer. Buffer to hold the returned objects * @param bufferElementCount. The number of objects the buffer can hold. * * @returns The number of items copied to bufferElementCount. */ size_t(CARB_ABI* getOutputs)(const CompoundNodeTypeObj& nodeType, AttributeTemplateObj* templateBuffer, size_t bufferElementCount); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(ICompoundNodeType, getOutputs, 12); } } } }
omniverse-code/kit/include/omni/graph/core/unstable/IPrivateNodeGraphDef.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IPrivateNodeGraphDef.h //! //! @brief Defines @ref omni::graph::core::unstable::IPrivateNodeGraphDef. #pragma once #include <omni/graph/core/Handle.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/exec/unstable/ConstName.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/SchedulingInfo.h> #include <omni/graph/exec/unstable/Status.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IDef; class INode; class ExecutionTask; } } namespace core { namespace unstable { // forward declarations needed by interface declaration class IPrivateNodeGraphDef; class IPrivateNodeGraphDef_abi; //! This is a private interface. Rather than directly using this private interface, //! access the functionality this interface provides by subclassing GenericGraphDef class IPrivateNodeGraphDef_abi : public omni::core::Inherits<exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.core.unstable.IPrivateNodeGraphDef")> { protected: //! Returns a handle to authoring graph associated with this definition. virtual GraphObj getAuthoringGraph_abi() noexcept = 0; //! Returns True if this graph is used as part of instancing pipeline, false otherwise. //! //! @note We will most likely remove this before making this interface stable virtual bool isInstanced_abi() noexcept = 0; //! Internal binding to receive invalidation messages. //! //! @note Once OG will provide topology changed notifications we can register to, this should go away. virtual void attachToAuthoring_abi(OMNI_ATTR("not_null") omni::graph::exec::unstable::IDef* definition) noexcept = 0; //! Pre-execution call can be used to setup the graph state prior to execution or skip entirely the execution. //! //! The given task must not be @c nullptr. virtual omni::graph::exec::unstable::Status preExecute_abi(OMNI_ATTR( "in, out, not_null, throw_if_null, ref") omni::graph::exec::unstable::ExecutionTask* info) noexcept = 0; //! Post-execution call can be used to finalize the execution, e.g. transfer computation results to consumers. //! //! The given task must not be @c nullptr. virtual omni::graph::exec::unstable::Status postExecute_abi(OMNI_ATTR( "in, out, not_null, throw_if_null, ref") omni::graph::exec::unstable::ExecutionTask* info) noexcept = 0; //! Acquire internal information about bucketing of instances for vectorized execution //! //! Arguments must not be @c nullptr. virtual void getInstanceInfo_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") omni::graph::exec::unstable::ExecutionTask const* info, OMNI_ATTR("out, not_null, ref") omni::graph::core::InstanceIndex* retBaseInstanceIndex, OMNI_ATTR("out, not_null, ref") size_t* retNumberOfInstances) noexcept = 0; }; } // namespace unstable } // namespace core } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/core/unstable/IPrivateNodeGraphDef.gen.h> // custom API declaration //! @copydoc omni::graph::core::IPrivateNodeGraphDef_abi class omni::graph::core::unstable::IPrivateNodeGraphDef : public omni::core::Generated<omni::graph::core::unstable::IPrivateNodeGraphDef_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IGraphBuilder.h> #include <omni/graph/exec/unstable/INode.h> // custom API implementation // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/core/unstable/IPrivateNodeGraphDef.gen.h>
omniverse-code/kit/include/omni/graph/core/cuda/Matrix2d.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // CUDA-compatible struct with functions for working with a 2x2 double matrix #include <float.h> struct Matrix2d { //! Default constructor. Leaves the matrix component values undefined. Matrix2d() = default; __device__ explicit Matrix2d(double s) { SetDiagonal(s); } __device__ Matrix2d& SetDiagonal(double s); __device__ Matrix2d& SetScale(double s); __device__ Matrix2d GetInverse() const; //! Post-multiplies matrix \e m into this matrix. __device__ Matrix2d& operator*=(const Matrix2d& m); //! Multiplies matrix \e m1 by \e m2. __device__ friend Matrix2d operator*(const Matrix2d& m1, const Matrix2d& m2) { Matrix2d tmp(m1); tmp *= m2; return tmp; } //! Matrix storage, in row-major order. double _mtx[2][2]; }; // Leaves the [2][2] element as 1 inline __device__ Matrix2d& Matrix2d::SetScale(double s) { _mtx[0][0] = s; _mtx[0][1] = 0.0; _mtx[1][0] = 0.0; _mtx[1][1] = s; return *this; } inline __device__ Matrix2d& Matrix2d::SetDiagonal(double s) { _mtx[0][0] = s; _mtx[0][1] = 0.0; _mtx[1][0] = 0.0; _mtx[1][1] = s; return *this; } inline __device__ Matrix2d Matrix2d::GetInverse() const { double x00, x01; double x10, x11; // Pickle values for computing determinants into registers x00 = _mtx[0][0]; x01 = _mtx[0][1]; x10 = _mtx[1][0]; x11 = _mtx[1][1]; // Compute the determinant double det = x00 * x11 - x01 * x10; Matrix2d inverse; double eps = 0; if (abs(det) > eps) { double rcp = 1.0 / det; // Multiply all 3x3 cofactors by reciprocal & transpose inverse._mtx[0][0] = x11 * rcp; inverse._mtx[0][1] = -x01 * rcp; inverse._mtx[1][0] = -x10 * rcp; inverse._mtx[1][1] = x00 * rcp; } else { inverse.SetScale(FLT_MAX); } return inverse; } inline __device__ Matrix2d& Matrix2d::operator*=(const Matrix2d& m) { // Save current values before they are overwritten Matrix2d tmp = *this; _mtx[0][0] = tmp._mtx[0][0] * m._mtx[0][0] + tmp._mtx[0][1] * m._mtx[1][0]; _mtx[0][1] = tmp._mtx[0][0] * m._mtx[0][1] + tmp._mtx[0][1] * m._mtx[1][1]; _mtx[1][0] = tmp._mtx[1][0] * m._mtx[0][0] + tmp._mtx[1][1] * m._mtx[1][0]; _mtx[1][1] = tmp._mtx[1][0] * m._mtx[0][1] + tmp._mtx[1][1] * m._mtx[1][1]; return *this; }
omniverse-code/kit/include/omni/graph/core/cuda/Matrix4d.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // CUDA-compatible struct with functions for working with a 4x4 double matrix #include <float.h> struct Matrix4d { //! Default constructor. Leaves the matrix component values undefined. Matrix4d() = default; __device__ explicit Matrix4d(double s) { SetDiagonal(s); } __device__ Matrix4d& SetDiagonal(double s); __device__ Matrix4d& SetScale(double s); __device__ Matrix4d GetInverse() const; //! Post-multiplies matrix \e m into this matrix. __device__ Matrix4d& operator*=(const Matrix4d& m); //! Multiplies matrix \e m1 by \e m2. __device__ friend Matrix4d operator*(const Matrix4d& m1, const Matrix4d& m2) { Matrix4d tmp(m1); tmp *= m2; return tmp; } //! Matrix storage, in row-major order. double _mtx[4][4]; }; // Leaves the [3][3] element as 1 inline __device__ Matrix4d& Matrix4d::SetScale(double s) { _mtx[0][0] = s; _mtx[0][1] = 0.0; _mtx[0][2] = 0.0; _mtx[0][3] = 0.0; _mtx[1][0] = 0.0; _mtx[1][1] = s; _mtx[1][2] = 0.0; _mtx[1][3] = 0.0; _mtx[2][0] = 0.0; _mtx[2][1] = 0.0; _mtx[2][2] = s; _mtx[2][3] = 0.0; _mtx[3][0] = 0.0; _mtx[3][1] = 0.0; _mtx[3][2] = 0.0; _mtx[3][3] = 1.0; return *this; } inline __device__ Matrix4d& Matrix4d::SetDiagonal(double s) { _mtx[0][0] = s; _mtx[0][1] = 0.0; _mtx[0][2] = 0.0; _mtx[0][3] = 0.0; _mtx[1][0] = 0.0; _mtx[1][1] = s; _mtx[1][2] = 0.0; _mtx[1][3] = 0.0; _mtx[2][0] = 0.0; _mtx[2][1] = 0.0; _mtx[2][2] = s; _mtx[2][3] = 0.0; _mtx[3][0] = 0.0; _mtx[3][1] = 0.0; _mtx[3][2] = 0.0; _mtx[3][3] = s; return *this; } inline __device__ Matrix4d Matrix4d::GetInverse() const { double x00, x01, x02, x03; double x10, x11, x12, x13; double x20, x21, x22, x23; double x30, x31, x32, x33; double y01, y02, y03, y12, y13, y23; double z00, z10, z20, z30; double z01, z11, z21, z31; double z02, z03, z12, z13, z22, z23, z32, z33; // Pickle 1st two columns of matrix into registers x00 = _mtx[0][0]; x01 = _mtx[0][1]; x10 = _mtx[1][0]; x11 = _mtx[1][1]; x20 = _mtx[2][0]; x21 = _mtx[2][1]; x30 = _mtx[3][0]; x31 = _mtx[3][1]; // Compute all six 2x2 determinants of 1st two columns y01 = x00 * x11 - x10 * x01; y02 = x00 * x21 - x20 * x01; y03 = x00 * x31 - x30 * x01; y12 = x10 * x21 - x20 * x11; y13 = x10 * x31 - x30 * x11; y23 = x20 * x31 - x30 * x21; // Pickle last two columns of matrix into registers x02 = _mtx[0][2]; x03 = _mtx[0][3]; x12 = _mtx[1][2]; x13 = _mtx[1][3]; x22 = _mtx[2][2]; x23 = _mtx[2][3]; x32 = _mtx[3][2]; x33 = _mtx[3][3]; // Compute all 3x3 cofactors for 2nd two columns */ z33 = x02 * y12 - x12 * y02 + x22 * y01; z23 = x12 * y03 - x32 * y01 - x02 * y13; z13 = x02 * y23 - x22 * y03 + x32 * y02; z03 = x22 * y13 - x32 * y12 - x12 * y23; z32 = x13 * y02 - x23 * y01 - x03 * y12; z22 = x03 * y13 - x13 * y03 + x33 * y01; z12 = x23 * y03 - x33 * y02 - x03 * y23; z02 = x13 * y23 - x23 * y13 + x33 * y12; // Compute all six 2x2 determinants of 2nd two columns y01 = x02 * x13 - x12 * x03; y02 = x02 * x23 - x22 * x03; y03 = x02 * x33 - x32 * x03; y12 = x12 * x23 - x22 * x13; y13 = x12 * x33 - x32 * x13; y23 = x22 * x33 - x32 * x23; // Compute all 3x3 cofactors for 1st two columns z30 = x11 * y02 - x21 * y01 - x01 * y12; z20 = x01 * y13 - x11 * y03 + x31 * y01; z10 = x21 * y03 - x31 * y02 - x01 * y23; z00 = x11 * y23 - x21 * y13 + x31 * y12; z31 = x00 * y12 - x10 * y02 + x20 * y01; z21 = x10 * y03 - x30 * y01 - x00 * y13; z11 = x00 * y23 - x20 * y03 + x30 * y02; z01 = x20 * y13 - x30 * y12 - x10 * y23; // compute 4x4 determinant & its reciprocal double det = x30 * z30 + x20 * z20 + x10 * z10 + x00 * z00; Matrix4d inverse; double eps = 0; if (abs(det) > eps) { double rcp = 1.0 / det; // Multiply all 3x3 cofactors by reciprocal & transpose inverse._mtx[0][0] = z00 * rcp; inverse._mtx[0][1] = z10 * rcp; inverse._mtx[1][0] = z01 * rcp; inverse._mtx[0][2] = z20 * rcp; inverse._mtx[2][0] = z02 * rcp; inverse._mtx[0][3] = z30 * rcp; inverse._mtx[3][0] = z03 * rcp; inverse._mtx[1][1] = z11 * rcp; inverse._mtx[1][2] = z21 * rcp; inverse._mtx[2][1] = z12 * rcp; inverse._mtx[1][3] = z31 * rcp; inverse._mtx[3][1] = z13 * rcp; inverse._mtx[2][2] = z22 * rcp; inverse._mtx[2][3] = z32 * rcp; inverse._mtx[3][2] = z23 * rcp; inverse._mtx[3][3] = z33 * rcp; } else { inverse.SetScale(FLT_MAX); } return inverse; } inline __device__ Matrix4d& Matrix4d::operator*=(const Matrix4d& m) { // Save current values before they are overwritten Matrix4d tmp = *this; _mtx[0][0] = tmp._mtx[0][0] * m._mtx[0][0] + tmp._mtx[0][1] * m._mtx[1][0] + tmp._mtx[0][2] * m._mtx[2][0] + tmp._mtx[0][3] * m._mtx[3][0]; _mtx[0][1] = tmp._mtx[0][0] * m._mtx[0][1] + tmp._mtx[0][1] * m._mtx[1][1] + tmp._mtx[0][2] * m._mtx[2][1] + tmp._mtx[0][3] * m._mtx[3][1]; _mtx[0][2] = tmp._mtx[0][0] * m._mtx[0][2] + tmp._mtx[0][1] * m._mtx[1][2] + tmp._mtx[0][2] * m._mtx[2][2] + tmp._mtx[0][3] * m._mtx[3][2]; _mtx[0][3] = tmp._mtx[0][0] * m._mtx[0][3] + tmp._mtx[0][1] * m._mtx[1][3] + tmp._mtx[0][2] * m._mtx[2][3] + tmp._mtx[0][3] * m._mtx[3][3]; _mtx[1][0] = tmp._mtx[1][0] * m._mtx[0][0] + tmp._mtx[1][1] * m._mtx[1][0] + tmp._mtx[1][2] * m._mtx[2][0] + tmp._mtx[1][3] * m._mtx[3][0]; _mtx[1][1] = tmp._mtx[1][0] * m._mtx[0][1] + tmp._mtx[1][1] * m._mtx[1][1] + tmp._mtx[1][2] * m._mtx[2][1] + tmp._mtx[1][3] * m._mtx[3][1]; _mtx[1][2] = tmp._mtx[1][0] * m._mtx[0][2] + tmp._mtx[1][1] * m._mtx[1][2] + tmp._mtx[1][2] * m._mtx[2][2] + tmp._mtx[1][3] * m._mtx[3][2]; _mtx[1][3] = tmp._mtx[1][0] * m._mtx[0][3] + tmp._mtx[1][1] * m._mtx[1][3] + tmp._mtx[1][2] * m._mtx[2][3] + tmp._mtx[1][3] * m._mtx[3][3]; _mtx[2][0] = tmp._mtx[2][0] * m._mtx[0][0] + tmp._mtx[2][1] * m._mtx[1][0] + tmp._mtx[2][2] * m._mtx[2][0] + tmp._mtx[2][3] * m._mtx[3][0]; _mtx[2][1] = tmp._mtx[2][0] * m._mtx[0][1] + tmp._mtx[2][1] * m._mtx[1][1] + tmp._mtx[2][2] * m._mtx[2][1] + tmp._mtx[2][3] * m._mtx[3][1]; _mtx[2][2] = tmp._mtx[2][0] * m._mtx[0][2] + tmp._mtx[2][1] * m._mtx[1][2] + tmp._mtx[2][2] * m._mtx[2][2] + tmp._mtx[2][3] * m._mtx[3][2]; _mtx[2][3] = tmp._mtx[2][0] * m._mtx[0][3] + tmp._mtx[2][1] * m._mtx[1][3] + tmp._mtx[2][2] * m._mtx[2][3] + tmp._mtx[2][3] * m._mtx[3][3]; _mtx[3][0] = tmp._mtx[3][0] * m._mtx[0][0] + tmp._mtx[3][1] * m._mtx[1][0] + tmp._mtx[3][2] * m._mtx[2][0] + tmp._mtx[3][3] * m._mtx[3][0]; _mtx[3][1] = tmp._mtx[3][0] * m._mtx[0][1] + tmp._mtx[3][1] * m._mtx[1][1] + tmp._mtx[3][2] * m._mtx[2][1] + tmp._mtx[3][3] * m._mtx[3][1]; _mtx[3][2] = tmp._mtx[3][0] * m._mtx[0][2] + tmp._mtx[3][1] * m._mtx[1][2] + tmp._mtx[3][2] * m._mtx[2][2] + tmp._mtx[3][3] * m._mtx[3][2]; _mtx[3][3] = tmp._mtx[3][0] * m._mtx[0][3] + tmp._mtx[3][1] * m._mtx[1][3] + tmp._mtx[3][2] * m._mtx[2][3] + tmp._mtx[3][3] * m._mtx[3][3]; return *this; } inline __device__ static float3 getTranslation(const Matrix4d& src) { return float3{ float(src._mtx[3][0]), float(src._mtx[3][1]), float(src._mtx[3][2]) }; } inline __device__ static float4 getRotation(const Matrix4d& src) { // if (!removeScale(src)) //{ // return carb::Float4{0.0f, 0.0f, 0.0f, 1.0f}; //} float tr = float(src._mtx[0][0] + src._mtx[1][1] + src._mtx[2][2]); if (tr >= 0.0) { float s = sqrtf(tr + 1.0f); // better use invsqrt, but didn't find the fast API.... float4 result; result.w = 0.5f * s; s = 0.5f / s; result.x = float(src._mtx[1][2] - src._mtx[2][1]) * s; result.y = float(src._mtx[2][0] - src._mtx[0][2]) * s; result.z = float(src._mtx[0][1] - src._mtx[1][0]) * s; return result; } else { int i = 0; if (src._mtx[1][1] > src._mtx[0][0]) i = 1; if (src._mtx[2][2] > src._mtx[i][i]) i = 2; static constexpr int next[3] = { 1, 2, 0 }; int j = next[i]; int k = next[j]; float s = float(sqrt(src._mtx[i][i] - src._mtx[j][j] - src._mtx[k][k] + 1.0f)); float quat[4]; quat[i] = 0.5f * s; s = 0.5f / s; quat[j] = float(src._mtx[i][j] + src._mtx[j][i]) * s; quat[k] = float(src._mtx[k][i] + src._mtx[i][k]) * s; quat[3] = float(src._mtx[j][k] - src._mtx[k][j]) * s; return float4{ quat[0], quat[1], quat[2], quat[3] }; } } inline __device__ static float4 quatMultiply(const float4& q0, const float4& q1) { float4 result; result.x = q0.w * q1.x + q0.x * q1.w + q0.y * q1.z - q0.z * q1.y; result.y = q0.w * q1.y - q0.x * q1.z + q0.y * q1.w + q0.z * q1.x; result.z = q0.w * q1.z + q0.x * q1.y - q0.y * q1.x + q0.z * q1.w; result.w = q0.w * q1.w - q0.x * q1.x - q0.y * q1.y - q0.z * q1.z; return result; } inline __device__ static void matrixToDualQuat(float* dst, const Matrix4d& src) { float3 trans = getTranslation(src); float4 quat = getRotation(src); float invQuatLength = 1 / sqrt(quat.x * quat.x + quat.y * quat.y + quat.z * quat.z + quat.w * quat.w); dst[0] = quat.x * invQuatLength; dst[1] = quat.y * invQuatLength; dst[2] = quat.z * invQuatLength; dst[3] = quat.w * invQuatLength; float4 quat2{ trans.x, trans.y, trans.z, 0.0f }; float4 dual = quatMultiply(quat2, float4{ dst[0], dst[1], dst[2], dst[3] }); dst[4] = dual.x * 0.5f; dst[5] = dual.y * 0.5f; dst[6] = dual.z * 0.5f; dst[7] = dual.w * 0.5f; }
omniverse-code/kit/include/omni/graph/core/cuda/Matrix3d.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // CUDA-compatible struct with functions for working with a 3x3 double matrix #include <float.h> struct Matrix3d { //! Default constructor. Leaves the matrix component values undefined. Matrix3d() = default; __device__ explicit Matrix3d(double s) { SetDiagonal(s); } __device__ Matrix3d& SetDiagonal(double s); __device__ Matrix3d& SetScale(double s); __device__ Matrix3d GetInverse() const; //! Post-multiplies matrix \e m into this matrix. __device__ Matrix3d& operator*=(const Matrix3d& m); //! Multiplies matrix \e m1 by \e m2. __device__ friend Matrix3d operator*(const Matrix3d& m1, const Matrix3d& m2) { Matrix3d tmp(m1); tmp *= m2; return tmp; } //! Matrix storage, in row-major order. double _mtx[3][3]; }; // Leaves the [2][2] element as 1 inline __device__ Matrix3d& Matrix3d::SetScale(double s) { _mtx[0][0] = s; _mtx[0][1] = 0.0; _mtx[0][2] = 0.0; _mtx[1][0] = 0.0; _mtx[1][1] = s; _mtx[1][2] = 0.0; _mtx[2][0] = 0.0; _mtx[2][1] = 0.0; _mtx[2][2] = s; return *this; } inline __device__ Matrix3d& Matrix3d::SetDiagonal(double s) { _mtx[0][0] = s; _mtx[0][1] = 0.0; _mtx[0][2] = 0.0; _mtx[1][0] = 0.0; _mtx[1][1] = s; _mtx[1][2] = 0.0; _mtx[2][0] = 0.0; _mtx[2][1] = 0.0; _mtx[2][2] = s; return *this; } inline __device__ Matrix3d Matrix3d::GetInverse() const { double x00, x01, x02; double x10, x11, x12; double x20, x21, x22; double det00, det01, det02; double det10, det11, det12; double det20, det21, det22; // Pickle values for computing determinants into registers x00 = _mtx[0][0]; x01 = _mtx[0][1]; x02 = _mtx[0][2]; x10 = _mtx[1][0]; x11 = _mtx[1][1]; x12 = _mtx[1][2]; x20 = _mtx[2][0]; x21 = _mtx[2][1]; x22 = _mtx[2][2]; // Compute the determinants of the transposed matrix det00 = x11 * x22 - x21 * x12; det01 = x02 * x21 - x01 * x22; det02 = x01 * x12 - x02 * x11; det10 = x12 * x20 - x10 * x22; det11 = x00 * x22 - x02 * x20; det12 = x10 * x02 - x00 * x12; det20 = x10 * x21 - x20 * x11; det21 = x20 * x01 - x00 * x21; det22 = x00 * x11 - x10 * x01; // compute determinant from the first row double det = x00 * det00 + x01 * det10 + x02 * det20; Matrix3d inverse; double eps = 0; if (abs(det) > eps) { double rcp = 1.0 / det; // Multiply all 3x3 cofactors by reciprocal & transpose inverse._mtx[0][0] = det00 * rcp; inverse._mtx[0][1] = det01 * rcp; inverse._mtx[1][0] = det10 * rcp; inverse._mtx[0][2] = det02 * rcp; inverse._mtx[2][0] = det20 * rcp; inverse._mtx[1][1] = det11 * rcp; inverse._mtx[1][2] = det12 * rcp; inverse._mtx[2][1] = det21 * rcp; inverse._mtx[2][2] = det22 * rcp; } else { inverse.SetScale(FLT_MAX); } return inverse; } inline __device__ Matrix3d& Matrix3d::operator*=(const Matrix3d& m) { // Save current values before they are overwritten Matrix3d tmp = *this; _mtx[0][0] = tmp._mtx[0][0] * m._mtx[0][0] + tmp._mtx[0][1] * m._mtx[1][0] + tmp._mtx[0][2] * m._mtx[2][0]; _mtx[0][1] = tmp._mtx[0][0] * m._mtx[0][1] + tmp._mtx[0][1] * m._mtx[1][1] + tmp._mtx[0][2] * m._mtx[2][1]; _mtx[0][2] = tmp._mtx[0][0] * m._mtx[0][2] + tmp._mtx[0][1] * m._mtx[1][2] + tmp._mtx[0][2] * m._mtx[2][2]; _mtx[1][0] = tmp._mtx[1][0] * m._mtx[0][0] + tmp._mtx[1][1] * m._mtx[1][0] + tmp._mtx[1][2] * m._mtx[2][0]; _mtx[1][1] = tmp._mtx[1][0] * m._mtx[0][1] + tmp._mtx[1][1] * m._mtx[1][1] + tmp._mtx[1][2] * m._mtx[2][1]; _mtx[1][2] = tmp._mtx[1][0] * m._mtx[0][2] + tmp._mtx[1][1] * m._mtx[1][2] + tmp._mtx[1][2] * m._mtx[2][2]; _mtx[2][0] = tmp._mtx[2][0] * m._mtx[0][0] + tmp._mtx[2][1] * m._mtx[1][0] + tmp._mtx[2][2] * m._mtx[2][0]; _mtx[2][1] = tmp._mtx[2][0] * m._mtx[0][1] + tmp._mtx[2][1] * m._mtx[1][1] + tmp._mtx[2][2] * m._mtx[2][1]; _mtx[2][2] = tmp._mtx[2][0] * m._mtx[0][2] + tmp._mtx[2][1] * m._mtx[1][2] + tmp._mtx[2][2] * m._mtx[2][2]; return *this; }
omniverse-code/kit/include/omni/graph/core/cuda/CUDAUtils.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // Collection of CUDA math utilities #include "cuda_runtime.h" using double44 = double[4][4]; using float44 = float[4][4]; // =============================================================== // Return the dot product of two float4s inline __host__ __device__ float dot(const float4& a, const float4& b) { return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } // =============================================================== // Return the elementwise sum of two float4s inline __host__ __device__ float4 operator+(const float4& a, const float4& b) { return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } // =============================================================== // Return the vector product of two float4s inline __host__ __device__ float4 operator*(const float4& a, const float& s) { return make_float4(a.x * s, a.y * s, a.z * s, a.w * s); } // =============================================================== // Return the product of a float4 vector and a 4x4 matrix inline __host__ __device__ float4 operator*(const float4& p, const double44& m) { float4 result; result.x = (float)((double)p.x * m[0][0] + (double)p.y * m[1][0] + (double)p.z * m[2][0] + (double)p.w * m[3][0]); result.y = (float)((double)p.x * m[0][1] + (double)p.y * m[1][1] + (double)p.z * m[2][1] + (double)p.w * m[3][1]); result.z = (float)((double)p.x * m[0][2] + (double)p.y * m[1][2] + (double)p.z * m[2][2] + (double)p.w * m[3][2]); result.w = (float)((double)p.x * m[0][3] + (double)p.y * m[1][3] + (double)p.z * m[2][3] + (double)p.w * m[3][3]); return result; } // =============================================================== // Return the product of a float4 vector and a 4x4 float matrix inline __host__ __device__ float4 operator*(const float4& p, const float44& m) { float4 result; result.x = p.x * m[0][0] + p.y * m[1][0] + p.z * m[2][0] + p.w * m[3][0]; result.y = p.x * m[0][1] + p.y * m[1][1] + p.z * m[2][1] + p.w * m[3][1]; result.z = p.x * m[0][2] + p.y * m[1][2] + p.z * m[2][2] + p.w * m[3][2]; result.w = p.x * m[0][3] + p.y * m[1][3] + p.z * m[2][3] + p.w * m[3][3]; return result; } // =============================================================== // Return a float4 with it's w-scale applied and set to 1.0 inline __host__ __device__ float4 homogenize(const float4& p) { float inv = (abs(p.w)) > 1e-6f ? 1.0f / p.w : 1.0f; return p * inv; } // =============================================================== // Return the dot product of two float3s inline __host__ __device__ float dot(const float3& a, const float3& b) { return a.x * b.x + a.y * b.y + a.z * b.z; } // =============================================================== // Return the elementwise product of two float3s inline __host__ __device__ float3 hadamard_product(const float3& a, const float3& b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } // =============================================================== // Return the cross product of two float3s inline __host__ __device__ float3 cross(const float3& a, const float3& b) { return make_float3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); } // =============================================================== // Return the elementwise sum of two float3s inline __host__ __device__ float3 operator+(const float3& a, const float3& b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); } // =============================================================== // Return the elementwise difference between two float3s inline __host__ __device__ float3 operator-(const float3& a, const float3& b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } // =============================================================== // Return the product of a float3 and a constant inline __host__ __device__ float3 operator*(const float3& a, const float& s) { return make_float3(a.x * s, a.y * s, a.z * s); } // =============================================================== // Return the dot product of two double4s inline __host__ __device__ double dot(const double4& a, const double4& b) { return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } // =============================================================== // Return the elementwise sum of two double4s inline __host__ __device__ double4 operator+(const double4& a, const double4& b) { return make_double4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } // =============================================================== // Return the vector product of two double4s inline __host__ __device__ double4 operator*(const double4& a, const double& s) { return make_double4(a.x * s, a.y * s, a.z * s, a.w * s); } // =============================================================== // Return the product of a double4 vector and a 4x4 matrix inline __host__ __device__ double4 operator*(const double4& p, const double44& m) { double4 result; result.x = (double)((double)p.x * m[0][0] + (double)p.y * m[1][0] + (double)p.z * m[2][0] + (double)p.w * m[3][0]); result.y = (double)((double)p.x * m[0][1] + (double)p.y * m[1][1] + (double)p.z * m[2][1] + (double)p.w * m[3][1]); result.z = (double)((double)p.x * m[0][2] + (double)p.y * m[1][2] + (double)p.z * m[2][2] + (double)p.w * m[3][2]); result.w = (double)((double)p.x * m[0][3] + (double)p.y * m[1][3] + (double)p.z * m[2][3] + (double)p.w * m[3][3]); return result; } // =============================================================== // Return a double4 with it's w-scale applied and set to 1.0 inline __host__ __device__ double4 homogenize(const double4& p) { double inv = (abs(p.w)) > 1e-6f ? 1.0f / p.w : 1.0f; return p * inv; } // =============================================================== // Return the dot product of two double3s inline __host__ __device__ double dot(const double3& a, const double3& b) { return a.x * b.x + a.y * b.y + a.z * b.z; } // =============================================================== // Return the elementwise product of two double3s inline __host__ __device__ double3 hadamard_product(const double3& a, const double3& b) { return make_double3(a.x * b.x, a.y * b.y, a.z * b.z); } // =============================================================== // Return the cross product of two double3s inline __host__ __device__ double3 cross(const double3& a, const double3& b) { return make_double3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); } // =============================================================== // Return the elementwise sum of two double3s inline __host__ __device__ double3 operator+(const double3& a, const double3& b) { return make_double3(a.x + b.x, a.y + b.y, a.z + b.z); } // =============================================================== // Return the elementwise difference between two double3s inline __host__ __device__ double3 operator-(const double3& a, const double3& b) { return make_double3(a.x - b.x, a.y - b.y, a.z - b.z); } // =============================================================== // Return the product of a double3 and a constant inline __host__ __device__ double3 operator*(const double3& a, const double& s) { return make_double3(a.x * s, a.y * s, a.z * s); }
omniverse-code/kit/include/omni/graph/core/bundle/IConstBundle2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Provide read only access to recursive bundles. template <> class omni::core::Generated<omni::graph::core::IConstBundle2_abi> : public omni::graph::core::IConstBundle2_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IConstBundle2") //! Return true if this bundle is valid, false otherwise. bool isValid() noexcept; //! Return the context of this bundle. omni::graph::core::GraphContextObj getContext() noexcept; //! Return Handle to this bundle. Invalid handle is returned if this bundle is invalid. omni::graph::core::ConstBundleHandle getConstHandle() noexcept; //! Return full path of this bundle. omni::fabric::PathC getPath() noexcept; //! Return name of this bundle omni::graph::core::NameToken getName() noexcept; //! Return handle to the parent of this bundle. Invalid handle is returned if bundle has no parent. omni::graph::core::ConstBundleHandle getConstParentBundle() noexcept; //! @brief Get the names and types of all attributes in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when names and types are `nullptr`. When in this mode, *nameAndTypeCount //! will be populated with the number of attributes in the bundle. //! //! **Get mode** is enabled when names or types is not `nullptr`. Upon entering the function, *nameAndTypeCount //! stores the number of entries in names and types. In **Get mode** names are not nullptr, names array is populated //! with attribute names. In **Get mode** types are not nullptr, types array is populated with attribute types. //! //! @param names The names of the attributes. //! @param types The types of the attributes. //! @param nameAndTypeCount must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getAttributeNamesAndTypes(omni::graph::core::NameToken* const names, omni::graph::core::Type* const types, size_t* const nameAndTypeCount) noexcept; //! @brief Get read only handles to all attributes in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when attributes is `nullptr`. When in this mode, *attributeCount //! will be populated with the number of attributes in the bundle. //! //! **Get mode** is enabled when attributes is not `nullptr`. Upon entering the function, *attributeCount //! stores the number of entries in attributes. //! In **Get mode** attributes are not nullptr, attributes array is populated with attribute handles in the bundle. //! //! @param attributes The buffer to store handles of the attributes in this bundle. //! @param attributeCount Size of attributes buffer. Must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstAttributes(omni::graph::core::ConstAttributeDataHandle* const attributes, size_t* const attributeCount) noexcept; //! @brief Search for read only handles of the attribute in this bundle by using attribute names. //! //! @param names The name of the attributes to be searched for. //! @param nameCount Size of names buffer. //! @param attributes The buffer to store handles of the attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstAttributesByName(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::ConstAttributeDataHandle* const attributes) noexcept; //! @brief Get read only handles to all child bundles in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when bundles is `nullptr`. When in this mode, *bundleCount //! will be populated with the number of bundles in the bundle. //! //! **Get mode** is enabled when bundles is not `nullptr`. Upon entering the function, *bundleCount //! stores the number of entries in bundles. //! In **Get mode** bundles are not nullptr, bundles array is populated with bundle handles in the bundle. //! //! @param bundles The buffer to save child bundle handles. //! @param bundleCount Size of the bundles buffer. Must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstChildBundles(omni::graph::core::ConstBundleHandle* const bundles, size_t* const bundleCount) noexcept; //! @brief Get read only handle to child bundle by index. //! //! @param bundleIndex Bundle index in range [0, childBundleCount). //! @param bundle Handle under the index. If bundle index is out of range, then invalid handle is returned. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstChildBundle(size_t bundleIndex, omni::graph::core::ConstBundleHandle* const bundle) noexcept; //! @brief Lookup for read only handles to child bundles under specified names. //! //! For children that are not found invalid handles are returned. //! //! @param names The names of the child bundles in this bundle. //! @param nameCount The number of child bundles to be searched. //! @param foundBundles Output handles to the found bundles. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstChildBundlesByName(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::ConstBundleHandle* const foundBundles) noexcept; //! @deprecated Metadata storage is deprecated and invalid handle is returned. omni::graph::core::ConstBundleHandle getConstMetadataStorage() noexcept; //! @brief Get the names and types of all bundle metadata fields in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when fieldNames and fieldTypes are `nullptr`. When in this mode, *fieldCount //! will be populated with the number of metadata fields in this bundle. //! //! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function, //! *fieldCount stores the number of entries in fieldNames and @p fieldTypes. //! //! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names. //! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types. //! //! @param fieldNames Output field names in this bundle. //! @param fieldTypes Output field types in this bundle. //! @param fieldCount must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getBundleMetadataNamesAndTypes(omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept; //! @brief Search for field handles in this bundle by using field names. //! //!@param fieldNames Name of bundle metadata fields to be searched for. //!@param fieldCount Size of fieldNames and bundleMetadata arrays. //!@param bundleMetadata Handle to metadata fields in this bundle. //!@return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstBundleMetadataByName(const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::ConstAttributeDataHandle* const bundleMetadata) noexcept; //! @brief Get the names and types of all attribute metadata fields in the attribute. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when fieldNames and @p fieldTypes are `nullptr`. When in this mode, *fieldCount //! will be populated with the number of metadata fields in the attribute. //! //! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function, //! *fieldCount stores the number of entries in fieldNames and fieldTypes. //! //! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names. //! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types. //! //! @param attribute Name of the attribute. //! @param fieldNames Output field names in the attribute. //! @param fieldTypes Output field types in the attribute. //! @param fieldCount must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getAttributeMetadataNamesAndTypes(omni::graph::core::NameToken attribute, omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept; //! @brief Search for read only field handles in the attribute by using field names. //! //! @param attribute The name of the attribute. //! @param fieldNames The names of attribute metadata fields to be searched for. //! @param fieldCount Size of fieldNames and attributeMetadata arrays. //! @param attributeMetadata Handles to attribute metadata fields in the attribute. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstAttributeMetadataByName( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::ConstAttributeDataHandle* const attributeMetadata) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline bool omni::core::Generated<omni::graph::core::IConstBundle2_abi>::isValid() noexcept { return isValid_abi(); } inline omni::graph::core::GraphContextObj omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getContext() noexcept { return getContext_abi(); } inline omni::graph::core::ConstBundleHandle omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstHandle() noexcept { return getConstHandle_abi(); } inline omni::fabric::PathC omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getPath() noexcept { return getPath_abi(); } inline omni::graph::core::NameToken omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getName() noexcept { return getName_abi(); } inline omni::graph::core::ConstBundleHandle omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstParentBundle() noexcept { return getConstParentBundle_abi(); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getAttributeNamesAndTypes( omni::graph::core::NameToken* const names, omni::graph::core::Type* const types, size_t* const nameAndTypeCount) noexcept { return getAttributeNamesAndTypes_abi(names, types, nameAndTypeCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributes( omni::graph::core::ConstAttributeDataHandle* const attributes, size_t* const attributeCount) noexcept { return getConstAttributes_abi(attributes, attributeCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributesByName( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::ConstAttributeDataHandle* const attributes) noexcept { return getConstAttributesByName_abi(names, nameCount, attributes); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundles( omni::graph::core::ConstBundleHandle* const bundles, size_t* const bundleCount) noexcept { return getConstChildBundles_abi(bundles, bundleCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundle( size_t bundleIndex, omni::graph::core::ConstBundleHandle* const bundle) noexcept { return getConstChildBundle_abi(bundleIndex, bundle); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundlesByName( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::ConstBundleHandle* const foundBundles) noexcept { return getConstChildBundlesByName_abi(names, nameCount, foundBundles); } inline omni::graph::core::ConstBundleHandle omni::core::Generated< omni::graph::core::IConstBundle2_abi>::getConstMetadataStorage() noexcept { return getConstMetadataStorage_abi(); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getBundleMetadataNamesAndTypes( omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept { return getBundleMetadataNamesAndTypes_abi(fieldNames, fieldTypes, fieldCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstBundleMetadataByName( const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::ConstAttributeDataHandle* const bundleMetadata) noexcept { return getConstBundleMetadataByName_abi(fieldNames, fieldCount, bundleMetadata); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getAttributeMetadataNamesAndTypes( omni::graph::core::NameToken attribute, omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept { return getAttributeMetadataNamesAndTypes_abi(attribute, fieldNames, fieldTypes, fieldCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributeMetadataByName( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::ConstAttributeDataHandle* const attributeMetadata) noexcept { return getConstAttributeMetadataByName_abi(attribute, fieldNames, fieldCount, attributeMetadata); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/bundle/IBundleFactory2.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "IBundleFactory1.h" namespace omni { namespace graph { namespace core { OMNI_DECLARE_INTERFACE(IBundleFactory2); //! IBundleFactory version 2. //! //! The version 2 allows to retrieve instances of IBundle instances from paths. class IBundleFactory2_abi : public omni::core::Inherits<omni::graph::core::IBundleFactory, OMNI_TYPE_ID("omni.graph.core.IBundleFactory2")> { protected: //! Get read only IBundle interface from path. //! //! @param contextObj The context where bundles belong to. //! @param paths Input paths. //! @param pathCount Length of paths array. //! @param bundles Output instances of IConstBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getConstBundlesFromPaths_abi( GraphContextObj const* const contextObj OMNI_ATTR("in, not_null"), omni::fabric::PathC const* const paths OMNI_ATTR("in, not_null"), size_t pathCount, IConstBundle2** const bundles OMNI_ATTR("out, *not_null, count=pathCount")) noexcept = 0; //! Get read write IBundle interface from path. //! //! @param contextObj The context where bundles belong to. //! @param paths Input paths. //! @param pathCount Length of paths array. //! @param bundles Output instances of IBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getBundlesFromPaths_abi( GraphContextObj const* const contextObj OMNI_ATTR("in, not_null"), omni::fabric::PathC const* const paths OMNI_ATTR("in, not_null"), size_t pathCount, IBundle2** const bundles OMNI_ATTR("out, *not_null, count=pathCount")) noexcept = 0; }; } } } #include "IBundleFactory2.gen.h" //! @cond Doxygen_Suppress //! //! API part of the bundle factory interface OMNI_DEFINE_INTERFACE_API(omni::graph::core::IBundleFactory2) //! @endcond { public: //! Get read only IBundle interface from path. //! //! @param contextObj The context where bundles belong to. //! @param path Input path. //! //! @return Instance of IConstBundle interface. omni::core::ObjectPtr<omni::graph::core::IConstBundle2> getConstBundleFromPath( omni::graph::core::GraphContextObj const& contextObj, omni::fabric::PathC path) noexcept { omni::core::ObjectPtr<omni::graph::core::IConstBundle2> out; auto const outPtr = reinterpret_cast<omni::graph::core::IConstBundle2**>(&out); auto const result = getConstBundlesFromPaths_abi(&contextObj, &path, 1, outPtr); if (OMNI_FAILED(result)) { OMNI_LOG_ERROR("unable to get bundle at path: 0x%08X", result); } return out; } //! Get read write IBundle interface from path. //! //! @param contextObj The context where bundles belong to. //! @param path Input path. //! //! @return Instance of IBundle interface. omni::core::ObjectPtr<omni::graph::core::IBundle2> getBundleFromPath( omni::graph::core::GraphContextObj const& contextObj, omni::fabric::PathC path) noexcept { omni::core::ObjectPtr<omni::graph::core::IBundle2> out; auto const outPtr = reinterpret_cast<omni::graph::core::IBundle2**>(&out); auto const result = getBundlesFromPaths_abi(&contextObj, &path, 1, outPtr); if (OMNI_FAILED(result)) { OMNI_LOG_ERROR("unable to get bundle at path: 0x%08X", result); } return out; } //! Get read only IConstBundle2 interfaces from an array of paths. //! //! @param contextObj The context where bundles belong to. //! @param paths Input paths. //! @param pathCount Length of paths array. //! @param createdBundles Smart pointers that manage lifetime of IConstBundle2 instances. //! //! @return false on failure, true on success. bool getConstBundlesFromPaths( omni::graph::core::GraphContextObj const& contextObj, omni::fabric::PathC const* const paths, size_t const pathCount, omni::core::ObjectPtr<omni::graph::core::IConstBundle2>* createdBundles) noexcept { static_assert(sizeof(ObjectPtr<omni::graph::core::IConstBundle2>) == sizeof(omni::graph::core::IConstBundle2*), "ObjectPtr and IConstBundle2 pointer requires to be the same size!"); auto const result = getConstBundlesFromPaths_abi(&contextObj, paths, pathCount, reinterpret_cast<omni::graph::core::IConstBundle2**>(createdBundles)); if (OMNI_FAILED(result)) { OMNI_LOG_ERROR("unable to get const bundles from path: 0x%08X", result); return false; } return true; } };
omniverse-code/kit/include/omni/graph/core/bundle/PyIBundleFactory1.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIBundleFactory(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::IBundleFactory_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::IBundleFactory_abi>>, omni::core::IObject> clsParent(m, "_IBundleFactory"); py::class_<omni::graph::core::IBundleFactory, omni::core::Generated<omni::graph::core::IBundleFactory_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::IBundleFactory>, omni::core::IObject> cls(m, "IBundleFactory", R"OMNI_BIND_RAW_(Interface to create new bundles)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::IBundleFactory>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::IBundleFactory>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::IBundleFactory instantiation"); } return tmp; })); return omni::python::PyBind<omni::graph::core::IBundleFactory>::bind(cls); }
omniverse-code/kit/include/omni/graph/core/bundle/IBundleChanges1.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/core/IObject.h> #include <omni/graph/core/Handle.h> namespace omni { namespace graph { namespace core { //! Enumeration representing the type of change that occurred in a bundle. //! //! This enumeration is used to identify the kind of modification that has taken place in a bundle or attribute. //! It's used as the return type for functions that check bundles and attributes, signaling whether those have been //! modified or not. enum class BundleChangeType : uint64_t { None = 0 << 0, //!< Indicates that no change has occurred in the bundle. Modified = 1 << 0, //!< Indicates that the bundle has been modified. }; OMNI_DECLARE_INTERFACE(IBundleChanges); //! Interface for monitoring and handling changes in bundles and attributes. //! //! The IBundleChanges_abi is an interface that provides methods for checking whether bundles and attributes //! have been modified, and cleaning them if they have been modified. This is particularly useful in scenarios //! where it's crucial to track changes and maintain the state of bundles and attributes. //! //! This interface provides several methods for checking and cleaning modifications, each catering to different //! use cases such as handling single bundles, multiple bundles, attributes, or specific attributes of a single bundle. //! //! The methods of this interface return a BundleChangeType enumeration that indicates whether the checked entity //! (bundle or attribute) has been modified. class IBundleChanges_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.IBundleChanges")> { protected: //! @brief Activate tracking for specific bundle on its attributes and children. //! @param handle to the specific bundles to enable change tracking. //! @return An omni::core::Result indicating the success of the operation. virtual omni::core::Result activateChangeTracking_abi(BundleHandle handle) noexcept = 0; //! @brief Deactivate tracking for specific bundle on its attributes and children. //! @param handle to the specific bundles to enable change tracking. //! @return An omni::core::Result indicating the success of the operation. virtual omni::core::Result deactivateChangeTracking_abi(BundleHandle handle) noexcept = 0; //! @brief Retrieves the change status of an array of bundles. //! //! This method is used to check if any of the bundles in the provided array have been modified. //! //! The count parameter indicates the size of the bundles array, as well as the changes output array. //! //! @param bundles An array of handles to the specific bundles to check for modifications. //! @param count The number of bundle handles in the array. //! @param changes An array that will be filled with BundleChangeType values for each bundle. //! //! @returns An omni::core::Result indicating the success of the operation. OMNI_ATTR("no_py") virtual omni::core::Result getChangesForBundles_abi( ConstBundleHandle const* const bundles OMNI_ATTR("in, throw_if_null, count=count"), size_t count, BundleChangeType* const changes OMNI_ATTR("out, throw_if_null, count=count")) noexcept = 0; //! @brief Retrieves the change status of an array of attributes. //! //! This method is used to check if any of the attributes in the provided array have been modified. //! //! The count parameter indicates the size of the attributes array, as well as the changes output array. //! //! @param attributes An array of handles to the attributes to check for modifications. //! @param count The number of attribute handles in the array. //! @param changes An array that will be filled with BundleChangeType values for each attribute. //! //! @returns An omni::core::Result indicating the success of the operation. OMNI_ATTR("no_py") virtual omni::core::Result getChangesForAttributes_abi( ConstAttributeDataHandle const* const attributes OMNI_ATTR("in, throw_if_null, count=count"), size_t count, BundleChangeType* const changes OMNI_ATTR("out, throw_if_null, count=count")) noexcept = 0; //! @brief Retrieves the change status of specific attributes within a bundle. //! //! This method is used to check if any of the specified attributes within a bundle have been modified. //! It operates by providing a handle to the bundle and an array of attribute names to check for modifications. //! //! The count parameter indicates the number of attribute names in the array, as well as the size of the changes //! output array. //! //! @param bundle A handle to the specific bundle for which attribute changes are being checked. //! @param attributes An array of attribute names (Tokens) to check for modifications. //! @param count The number of attribute names in the array. //! @param changes An array that will be filled with BundleChangeType values for each attribute. //! //! @returns An omni::core::Result indicating the success of the operation. OMNI_ATTR("no_py, no_api") virtual omni::core::Result getChangesForAttributesInBundle_abi( ConstBundleHandle const bundle, fabric::TokenC const* const attributes OMNI_ATTR("in, throw_if_null, count=count"), size_t count, BundleChangeType* const changes OMNI_ATTR("out, throw_if_null, count=count")) noexcept = 0; //! Clears all recorded changes. //! //! This method is used to clear or reset all the recorded changes of the bundles and attributes. //! It can be used when the changes have been processed and need to be discarded. //! //! An omni::core::Result indicating the success of the operation. virtual omni::core::Result clearChanges_abi() noexcept = 0; }; } } } #include "IBundleChanges1.gen.h" OMNI_DEFINE_INTERFACE_API(omni::graph::core::IBundleChanges) { public: using BundleChangeType = omni::graph::core::BundleChangeType; using ConstBundleHandle = omni::graph::core::ConstBundleHandle; using ConstAttributeDataHandle = omni::graph::core::ConstAttributeDataHandle; //! @brief Retrieves the change status of a specific bundle. //! //! This method is used to check if a specific bundle or its contents have been modified. //! //! @param bundle The handle to the specific bundle to check for modifications. //! //! @returns A BundleChangeType value indicating the type of change (if any) that has occurred to the bundle. BundleChangeType getChange(ConstBundleHandle bundle) { BundleChangeType change; auto const result = getChangesForBundles_abi(&bundle, 1, &change); return OMNI_SUCCEEDED(result) ? change : BundleChangeType::None; } //! @brief Retrieves the change status of a specific attribute. //! //! This method is used to check if a specific attribute has been modified. //! //! @param attribute The handle to the specific attribute to check for modifications. //! //! @returns A BundleChangeType value indicating the type of change (if any) that has occurred to the attribute. BundleChangeType getChange(ConstAttributeDataHandle attribute) { BundleChangeType change; auto const result = getChangesForAttributes_abi(&attribute, 1, &change); return OMNI_SUCCEEDED(result) ? change : BundleChangeType::None; } //! @brief Retrieves the change status of multiple bundles. //! //! This method is used to check if any of the provided bundles or their contents have been modified. //! //! @param bundles An array of handles to the bundles to check for modifications. //! @param size The number of bundle handles in the array. //! @param changes An array that will be filled with BundleChangeType values for each bundle. //! //! @returns An omni::core::Result indicating the success of the operation. omni::core::Result getChanges(ConstBundleHandle const* bundles, size_t size, BundleChangeType* changes) { return getChangesForBundles_abi(bundles, size, changes); } //! @brief Retrieves the change status of multiple attributes. //! //! This method is used to check if any of the provided attributes have been modified. //! //! @param attributes An array of handles to the attributes to check for modifications. //! @param size The number of attribute handles in the array. //! @param changes An array that will be filled with BundleChangeType values for each attribute. //! //! @returns An omni::core::Result indicating the success of the operation. omni::core::Result getChanges(ConstAttributeDataHandle const* attributes, size_t size, BundleChangeType* changes) { return getChangesForAttributes_abi(attributes, size, changes); } //! @brief Retrieves the change status of multiple attributes within a specific bundle. //! //! This method is used to check if any of the specified attributes within a particular bundle have been modified. //! //! @param bundle The handle to the specific bundle whose attributes are to be checked for modifications. //! @param attributes An array of Tokens representing the attributes to check for modifications. //! @param size The number of attribute tokens in the array. //! @param changes An array that will be filled with BundleChangeType values for each attribute. //! //! @returns An omni::core::Result indicating the success of the operation. omni::core::Result getChanges(ConstBundleHandle bundle, fabric::TokenC const* attributes, size_t size, BundleChangeType* changes) { return getChangesForAttributesInBundle_abi(bundle, attributes, size, changes); } };
omniverse-code/kit/include/omni/graph/core/bundle/PyIBundleChanges1.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindBundleChangeType(py::module& m) { py::enum_<omni::graph::core::BundleChangeType> e( m, "BundleChangeType", R"OMNI_BIND_RAW_(Enumeration representing the type of change that occurred in a bundle. This enumeration is used to identify the kind of modification that has taken place in a bundle or attribute. It's used as the return type for functions that check bundles and attributes, signaling whether those have been modified or not.)OMNI_BIND_RAW_"); e.value("NONE", omni::graph::core::BundleChangeType::None, R"OMNI_BIND_RAW_(Indicates that no change has occurred in the bundle.)OMNI_BIND_RAW_"); e.value("MODIFIED", omni::graph::core::BundleChangeType::Modified, R"OMNI_BIND_RAW_(Indicates that the bundle has been modified.)OMNI_BIND_RAW_"); return e; } auto bindIBundleChanges(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::IBundleChanges_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::IBundleChanges_abi>>, omni::core::IObject> clsParent(m, "_IBundleChanges"); py::class_<omni::graph::core::IBundleChanges, omni::core::Generated<omni::graph::core::IBundleChanges_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::IBundleChanges>, omni::core::IObject> cls(m, "IBundleChanges", R"OMNI_BIND_RAW_(Interface for monitoring and handling changes in bundles and attributes. The IBundleChanges_abi is an interface that provides methods for checking whether bundles and attributes have been modified, and cleaning them if they have been modified. This is particularly useful in scenarios where it's crucial to track changes and maintain the state of bundles and attributes. This interface provides several methods for checking and cleaning modifications, each catering to different use cases such as handling single bundles, multiple bundles, attributes, or specific attributes of a single bundle. The methods of this interface return a BundleChangeType enumeration that indicates whether the checked entity (bundle or attribute) has been modified.)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::IBundleChanges>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::IBundleChanges>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::IBundleChanges instantiation"); } return tmp; })); cls.def("activate_change_tracking", [](omni::graph::core::IBundleChanges* self, omni::graph::core::BundleHandle* handle) { auto return_value = self->activateChangeTracking(*handle); return return_value; }, R"OMNI_BIND_RAW_(@brief Activate tracking for specific bundle on its attributes and children. @param handle to the specific bundles to enable change tracking. @return An omni::core::Result indicating the success of the operation.)OMNI_BIND_RAW_", py::arg("handle")); cls.def("deactivate_change_tracking", [](omni::graph::core::IBundleChanges* self, omni::graph::core::BundleHandle* handle) { auto return_value = self->deactivateChangeTracking(*handle); return return_value; }, R"OMNI_BIND_RAW_(@brief Deactivate tracking for specific bundle on its attributes and children. @param handle to the specific bundles to enable change tracking. @return An omni::core::Result indicating the success of the operation.)OMNI_BIND_RAW_", py::arg("handle")); cls.def("clear_changes", &omni::graph::core::IBundleChanges::clearChanges, R"OMNI_BIND_RAW_(Clears all recorded changes. This method is used to clear or reset all the recorded changes of the bundles and attributes. It can be used when the changes have been processed and need to be discarded. An omni::core::Result indicating the success of the operation.)OMNI_BIND_RAW_"); return omni::python::PyBind<omni::graph::core::IBundleChanges>::bind(cls); }
omniverse-code/kit/include/omni/graph/core/bundle/IBundleFactory1.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Interface to create new bundles template <> class omni::core::Generated<omni::graph::core::IBundleFactory_abi> : public omni::graph::core::IBundleFactory_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IBundleFactory") //! Create bundles at given paths and acquire instances of IBundle2 interface. //! //! @param contextObj The context where bundles are created. //! @param paths Locations for new bundles. //! @param pathCount Length of paths array. //! @param createdBundles Output instances of IBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createBundles(const omni::graph::core::GraphContextObj* const contextObj, const omni::fabric::PathC* const paths, size_t pathCount, omni::graph::core::IBundle2** const createdBundles) noexcept; //! Acquire instances of IConstBundle2 interface from const bundle handles. //! //! @param contextObj The context where bundles belong to. //! @param bundleHandles The bundle handles. //! @param bundleCount Length of bundleHandles array. //! @param bundles Output instances of IConstBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstBundles(const omni::graph::core::GraphContextObj* const contextObj, const omni::graph::core::ConstBundleHandle* const bundleHandles, size_t bundleCount, omni::graph::core::IConstBundle2** const bundles) noexcept; //! Acquire instances of IBundle2 interface from bundle handles. //! //! @param contextObj The context where bundles belong to. //! @param bundleHandles The bundle handles. //! @param bundleCount Length of bundleHandles array. //! @param bundles Output instances of IConstBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getBundles(const omni::graph::core::GraphContextObj* const contextObj, const omni::graph::core::BundleHandle* const bundleHandles, size_t bundleCount, omni::graph::core::IBundle2** const bundles) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory_abi>::createBundles( const omni::graph::core::GraphContextObj* const contextObj, const omni::fabric::PathC* const paths, size_t pathCount, omni::graph::core::IBundle2** const createdBundles) noexcept { return createBundles_abi(contextObj, paths, pathCount, createdBundles); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory_abi>::getConstBundles( const omni::graph::core::GraphContextObj* const contextObj, const omni::graph::core::ConstBundleHandle* const bundleHandles, size_t bundleCount, omni::graph::core::IConstBundle2** const bundles) noexcept { return getConstBundles_abi(contextObj, bundleHandles, bundleCount, bundles); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory_abi>::getBundles( const omni::graph::core::GraphContextObj* const contextObj, const omni::graph::core::BundleHandle* const bundleHandles, size_t bundleCount, omni::graph::core::IBundle2** const bundles) noexcept { return getBundles_abi(contextObj, bundleHandles, bundleCount, bundles); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/bundle/IBundle1.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/Handle.h> namespace omni { namespace graph { namespace core { // ====================================================================== /** Interface for bundle attribute data */ struct IBundle { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IBundle", 1, 6); /** * Counts the number of attributes contained in the bundle. * * @param[in] contextObj The context to which the bundle belongs * @param[in] bundle The handle to the bundle data * @return the number of attributes in the bundle */ size_t(CARB_ABI* getAttributesCount)(const GraphContextObj& contextObj, ConstBundleHandle bundle); /** * Get all of the attributes on the bundle in read-only form. * * @param[out] attrsOut Handles to the attributes on the bundle (preallocated to hold "count" members) * @param[in] contextObj The context to which the bundle belongs * @param[in] bundle The handle to the bundle data * @param[in] count The number of attribute slots allocated in attrsOut (should be >= getAttributesCount()) */ void(CARB_ABI* getAttributesR)(ConstAttributeDataHandle* attrsOut, const GraphContextObj& contextObj, ConstBundleHandle bundle, size_t count); /** * Get all of the attributes on the bundle in writable form. * * @param[out] attrsOut Handles to the attributes on the bundle (preallocated to hold "count" members) * @param[in] contextObj The context to which the bundle belongs * @param[in] bundle The handle to the bundle data * @param[in] count The number of attribute slots allocated in attrsOut (should be >= getAttributesCount()) */ void(CARB_ABI* getAttributesW)(AttributeDataHandle* attrsOut, const GraphContextObj& contextObj, BundleHandle bundle, size_t count); /** * Get attributes on the bundle whose names appear in "attrNames" in read-only form. * * @param[out] attrsOut Handles to the attributes on the bundle (preallocated to hold "count" members) * @param[in] contextObj The context to which the bundle belongs * @param[in] bundle The handle to the bundle data * @param[in] attrNames The list of attribute names to be looked up * @param[in] count The number of attribute slots allocated in attrsOut (should be >= getAttributesCount()) */ void(CARB_ABI* getAttributesByNameR)(ConstAttributeDataHandle* attrsOut, const GraphContextObj& contextObj, ConstBundleHandle bundle, const NameToken* attrNames, size_t count); /** * Get attributes on the bundle whose names appear in "attrNames" in writable form. * * @param[out] attrsOut Handles to the attributes on the bundle (preallocated to hold "count" members) * @param[in] contextObj The context to which the bundle belongs * @param[in] bundle The handle to the bundle data * @param[in] attrNames The list of attribute names to be looked up * @param[in] count The number of attribute slots allocated in attrsOut (should be >= getAttributesCount()) */ void(CARB_ABI* getAttributesByNameW)(AttributeDataHandle* attrsOut, const GraphContextObj& contextObj, BundleHandle bundle, const NameToken* attrNames, size_t count); /** * Get the names and types of all attributes on the bundle. * * @param[out] namesOut Handles to the names of attributes on the bundle (preallocated to hold "count" members) * @param[out] typesOut Handles to the types of attributes on the bundle (preallocated to hold "count" members) * @param[in] contextObj The context to which the bundle belongs * @param[in] bundle The handle to the bundle data * @param[in] count The number of attribute slots allocated in attrsOut (should be >= getAttributesCount()) */ void (CARB_ABI* getAttributeNamesAndTypes)(NameToken* namesOut, Type* typesOut, const GraphContextObj& contextObj, ConstBundleHandle bundle, size_t count); /** * Create a new attribute and add it to the bundle. * * @param[in] contextObj The context to which the bundle belongs * @param[in] destination The handle to the bundle data * @param[in] attrName Name for the new attribute * @param[in] attrType Type for the new attribute * @return Handle to the newly created attribute */ AttributeDataHandle(CARB_ABI* addAttribute)(const GraphContextObj& contextObj, BundleHandle destination, NameToken attrName, Type attrType); /** * Create a new attribute and add it to the bundle. * * @param[in] contextObj The context to which the bundle belongs * @param[in] destination The handle to the bundle data * @param[in] pattern Attribute whose name and type is to be copied for the new attribute * @return Handle to the newly created attribute */ AttributeDataHandle(CARB_ABI* addAttributeLike)(const GraphContextObj& contextObj, BundleHandle destination, ConstAttributeDataHandle pattern); /** * Create a new array attribute and add it to the bundle. * * @param[in] contextObj The context to which the bundle belongs * @param[in] destination The handle to the bundle data * @param[in] attrName Name for the new attribute * @param[in] attrType Type for the new attribute * @param[in] elementCount Starting element count for the array attribute * @return Handle to the newly created attribute */ AttributeDataHandle(CARB_ABI* addArrayAttribute)(const GraphContextObj& contextObj, BundleHandle destination, NameToken attrName, Type attrType, size_t elementCount); /** * Create a new array-of-arrays attribute and add it to the bundle. * * Note: At the moment only arrays and arrays-of-tuples are fully supported so use with caution * * @param[in] contextObj The context to which the bundle belongs * @param[in] destination The handle to the bundle data * @param[in] attrName Name for the new attribute * @param[in] baseType Base data type for the new attribute * @param[in] numComponents Number of tuple elements for the new attribute (e.g. 3 for float[3]) * @param[in] numArrays How many levels of arrays are on the attribute (currently only 1 is supported) * @param[in] ... Variadic list containing the starting element count for each level of array * @return Handle to the newly created attribute */ AttributeDataHandle(CARB_ABI* addArrayOfArraysAttribute)(const GraphContextObj& contextObj, BundleHandle destination, NameToken attrName, BaseDataType baseType, size_t numComponents, size_t numArrays, ...); /** * Create a new attribute by copying an existing one, including its data, and renaming and add it to the bundle. * * @param[in] contextObj The context to which the bundle belongs * @param[in] destination The handle to the bundle data * @param[in] destinationAttrName Name for the new attribute * @param[in] source Handle to attribute whose data type is to be copied * @return Handle to the newly created attribute */ void(CARB_ABI* copyAttribute)(const GraphContextObj& contextObj, BundleHandle destination, NameToken destinationAttrName, ConstAttributeDataHandle source); /** * Get the unique data ID of a bundle. * * @param[in] bundle The bundle whose ID is to be found * @return The unique ID of the bundle */ uint64_t(CARB_ABI* getDataID)(ConstBundleHandle bundle); //! @cond Doxygen_Suppress //! Deprecated function - do not use [[deprecated("Use getBundlePath")]] const char*(CARB_ABI* getPrimPath)(ConstBundleHandle); //! @endcond /** * Remove an existing attribute from the bundle. * * @param[in] contextObj The context to which the bundle belongs * @param[in] destination The handle to the bundle data * @param[in] attrName Name of the attribute to remove */ void (*removeAttribute)(const GraphContextObj& contextObj, BundleHandle destination, NameToken attrName); /** * Add a batch of attributes to a bundle. * * @param[in] contextObj The context to which the bundle belongs * @param[in] destination The handle to the bundle data * @param[in] attributeCount Number of attributes to be added * @param[in] attrNames Array of names for the new attributes * @param[in] attrTypes Array of types for the new attributes * @return Whether addition was successful */ bool(CARB_ABI* addAttributes)(const GraphContextObj& contextObj, BundleHandle destination, size_t attributeCount, const NameToken* attrNames, const Type* attrTypes); /** * Remove a batch of attributes from a bundle. * * @param[in] contextObj The context to which the bundle belongs * @param[in] destination The handle to the bundle data * @param[in] attributeCount Number of attributes to be removed * @param[in] attrNames Array of names to be removed */ bool(CARB_ABI* removeAttributes)(const GraphContextObj& contextObj, BundleHandle destination, size_t attributeCount, const NameToken* attrNames); /** * Copy a set of attributes from a source bundle/node to another bundle/node * Attributes taht don't exists on the destination will be created * The name on the destination can differ from the one on the source * * @param[in] contextObj The context object used to find the variable data. * @param[in] destBundleHandle The destination node/bundle on which to copy/create the the attributes. * @param[in] sourceBundleHandle The source node/bundle from which to read the data. * @param[in] srcNames The attributes to copy * @param[in] dstNames Optional - the name of the destination attribute. * if nullptr, the name of input will be used * if not nullptr, lenght must be equal to namesCount * @param[in] namesCount Length of srcNames array (and dstNames if provided) * */ void(CARB_ABI* copyAttributes)(const GraphContextObj& contextObj, BundleHandle destBundle, ConstBundleHandle sourceBundle, NameToken const* srcNames, NameToken const* dstNames, size_t namesCount); /** * Retrieves the path to the bundle. It will be part of the node in which it is defined. * * @param[in] bundle A handle pointing to a bundle * @return the bundle path */ const char*(CARB_ABI* getBundlePath)(ConstBundleHandle bundle); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IBundle, getBundlePath, 17) } } }
omniverse-code/kit/include/omni/graph/core/bundle/IBundleFactory2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! IBundleFactory version 2. //! //! The version 2 allows to retrieve instances of IBundle instances from paths. template <> class omni::core::Generated<omni::graph::core::IBundleFactory2_abi> : public omni::graph::core::IBundleFactory2_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IBundleFactory2") //! Get read only IBundle interface from path. //! //! @param contextObj The context where bundles belong to. //! @param paths Input paths. //! @param pathCount Length of paths array. //! @param bundles Output instances of IConstBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstBundlesFromPaths(const omni::graph::core::GraphContextObj* const contextObj, const omni::fabric::PathC* const paths, size_t pathCount, omni::graph::core::IConstBundle2** const bundles) noexcept; //! Get read write IBundle interface from path. //! //! @param contextObj The context where bundles belong to. //! @param paths Input paths. //! @param pathCount Length of paths array. //! @param bundles Output instances of IBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getBundlesFromPaths(const omni::graph::core::GraphContextObj* const contextObj, const omni::fabric::PathC* const paths, size_t pathCount, omni::graph::core::IBundle2** const bundles) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory2_abi>::getConstBundlesFromPaths( const omni::graph::core::GraphContextObj* const contextObj, const omni::fabric::PathC* const paths, size_t pathCount, omni::graph::core::IConstBundle2** const bundles) noexcept { return getConstBundlesFromPaths_abi(contextObj, paths, pathCount, bundles); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory2_abi>::getBundlesFromPaths( const omni::graph::core::GraphContextObj* const contextObj, const omni::fabric::PathC* const paths, size_t pathCount, omni::graph::core::IBundle2** const bundles) noexcept { return getBundlesFromPaths_abi(contextObj, paths, pathCount, bundles); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/bundle/PyIConstBundle2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIConstBundle2(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::IConstBundle2_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::IConstBundle2_abi>>, omni::core::IObject> clsParent(m, "_IConstBundle2"); py::class_<omni::graph::core::IConstBundle2, omni::core::Generated<omni::graph::core::IConstBundle2_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::IConstBundle2>, omni::core::IObject> cls(m, "IConstBundle2", R"OMNI_BIND_RAW_(Provide read only access to recursive bundles.)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::IConstBundle2>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::IConstBundle2>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::IConstBundle2 instantiation"); } return tmp; })); cls.def_property_readonly("valid", &omni::graph::core::IConstBundle2::isValid); return omni::python::PyBind<omni::graph::core::IConstBundle2>::bind(cls); }
omniverse-code/kit/include/omni/graph/core/bundle/IBundle2.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IBundle.h //! //! @brief Defines Read Write interfaces for recursive bundles. #pragma once #include "IConstBundle2.h" namespace omni { namespace graph { namespace core { //! Declare the IBundle2 interface definition OMNI_DECLARE_INTERFACE(IBundle2); //! Provide read write access to recursive bundles. class IBundle2_abi : public omni::core::Inherits<IConstBundle2, OMNI_TYPE_ID("omni.graph.core.IBundle2")> { protected: //! Return handle to this bundle. Invalid handle is returned if this bundle is invalid. OMNI_ATTR("no_py") virtual BundleHandle getHandle_abi() noexcept = 0; //! Return parent of this bundle, or invalid handle if there is no parent. OMNI_ATTR("no_py") virtual BundleHandle getParentBundle_abi() noexcept = 0; //! @brief Get read-write handles to all attributes in this bundle. //! //! @copydetails IConstBundle2_abi::getConstAttributes_abi OMNI_ATTR("no_py") virtual omni::core::Result getAttributes_abi( AttributeDataHandle* const attributes OMNI_ATTR("out, not_null, count=*attributeCount"), size_t* const attributeCount OMNI_ATTR("in, out, not_null")) noexcept = 0; //! @brief Searches for read-write handles of the attribute in this bundle by using attribute names. //! //! @copydetails IConstBundle2_abi::getConstAttributesByName_abi OMNI_ATTR("no_py") virtual omni::core::Result getAttributesByName_abi( NameToken const* const names OMNI_ATTR("in, not_null, count=nameCount"), size_t nameCount, AttributeDataHandle* const attributes OMNI_ATTR("out, not_null, count=nameCount")) noexcept = 0; //! @brief Get read write handles to all child bundles in this bundle. //! //! @copydetails IConstBundle2_abi::getConstChildBundles_abi OMNI_ATTR("no_py") virtual omni::core::Result getChildBundles_abi(BundleHandle* const bundles OMNI_ATTR("out, not_null, count=bundleCount"), size_t* const bundleCount OMNI_ATTR("in, out, not_null")) noexcept = 0; //! @brief Get read write handle to child bundle by index. //! //! @copydetails IConstBundle2_abi::getConstChildBundle_abi OMNI_ATTR("no_py") virtual omni::core::Result getChildBundle_abi(size_t bundleIndex, BundleHandle* const bundle OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Lookup for read write handles to child bundles under specified names. //! //! @copydetails IConstBundle2_abi::getConstChildBundlesByName_abi OMNI_ATTR("no_py") virtual omni::core::Result getChildBundlesByName_abi( NameToken const* const names OMNI_ATTR("in, not_null, count=nameCount"), size_t nameCount, BundleHandle* const foundBundles OMNI_ATTR("out, not_null, count=nameCount")) noexcept = 0; //! @brief Create new attributes by copying existing. //! //! Source attribute handles' data and metadata are copied. If a handle is invalid, //! then its source is ignored. //! Created attributes are owned by this bundle. //! //! @param newNames The names for the new attributes, if `nullptr` then names are taken from the source attributes. //! @param sourceAttributes Handles to attributes whose data type is to be copied. //! @param attributeCount Number of attributes to be copied. //! @param overwrite An option to overwrite existing attributes. //! @param copiedAttributes Output handles to the newly copied attributes. Can be `nullptr` if no output is //! required. //! @param copiedCount Number of successfully copied attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result copyAttributes_abi( NameToken const* const newNames OMNI_ATTR("in, count=attributeCount"), ConstAttributeDataHandle const* const sourceAttributes OMNI_ATTR("in, not_null, count=attributeCount"), size_t attributeCount, bool overwrite, AttributeDataHandle* const copiedAttributes OMNI_ATTR("out, count=attributeCount"), size_t* const copiedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Create attributes based on provided names and types. //! //! Created attributes are owned by this bundle. //! //! @param names The names of the attributes. //! @param types The types of the attributes. //! @param elementCount Number of elements in the array, can be `nullptr` if attribute is not an array. //! @param attributeCount Number of attributes to be created. //! @param createdAttributes Output handles to the newly created attributes. Can be nullptr if no output is //! required. //! @param createdCount Number of successfully created attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result createAttributes_abi( NameToken const* const names OMNI_ATTR("in, not_null, count=attributeCount"), Type const* const types OMNI_ATTR("in, not_null, count=attributeCount"), size_t const* const elementCount OMNI_ATTR("in, not_null, count=attributeCount"), size_t attributeCount, AttributeDataHandle* const createdAttributes OMNI_ATTR("out, count=attributeCount"), size_t* const createdCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Use attribute handles as pattern to create new attributes. //! //! The name and type for new attributes are taken from pattern attributes, data and metadata is not copied. //! If pattern handle is invalid, then attribute creation is skipped. //! Created attributes are owned by this bundle. //! //! @param patternAttributes Attributes whose name and type is to be used to create new attributes. //! @param patternCount Number of attributes to be created. //! @param createdAttributes Output handles to the newly created attributes. Can be nullptr if no output is //! required. //! @param createdCount Number of successfully created attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result createAttributesLike_abi( ConstAttributeDataHandle const* const patternAttributes OMNI_ATTR("in, not_null, count=patternCount"), size_t patternCount, AttributeDataHandle* const createdAttributes OMNI_ATTR("out, count=patternCount"), size_t* const createdCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Create immediate child bundles under specified names in this bundle. //! //! Only immediate children are created. This method does not work recursively. //! If name token is invalid, then child bundle creation is skipped. //! Created bundles are owned by this bundle. //! //! @param names New children names in this bundle. //! @param nameCount Number of bundles to be created. //! @param createdBundles Output handles to the newly created bundles. Can be nullptr if no output is required. //! @param createdCount Number of successfully created child bundles. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result createChildBundles_abi( NameToken const* const names OMNI_ATTR("in, not_null, count=nameCount"), size_t nameCount, BundleHandle* const createdBundles OMNI_ATTR("out, count=nameCount"), size_t* const createdCount OMNI_ATTR("out, not_null")) noexcept = 0; //! <b>Feature not implemented yet.</b> //! //! @brief Add a set of attributes to this bundle as links. //! //! Added attributes are links to other attributes that are part of another bundle. //! If target handle is invalid, then linking is skipped. //! The links are owned by this bundle, but targets of the links are not. //! Removing links from this bundle does not destroy the data links point to. //! //! @param linkNames The names for new links. //! @param targetAttributes Handles to attributes whose data is to be added. //! @param attributeCount Number of attributes to be added. //! @param linkedAttributes Output handles to linked attributes. Can be nullptr if no output is required. //! @param linkedCount Number of attributes successfully linked. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result linkAttributes_abi( NameToken const* const linkNames OMNI_ATTR("in, count=attributeCount"), ConstAttributeDataHandle const* const targetAttributes OMNI_ATTR("in, not_null, count=attributeCount"), size_t attributeCount, AttributeDataHandle* const linkedAttributes OMNI_ATTR("out, count=attributeCount"), size_t* const linkedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Copy bundle data and metadata from the source bundle to this bundle. //! //! If source handle is invalid, then operation is skipped. //! //! @param sourceBundle Handle to bundle whose data is to be copied. //! @param overwrite An option to overwrite existing content of the bundle. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result copyBundle_abi(ConstBundleHandle sourceBundle, bool overwrite) noexcept = 0; //! @brief Create new child bundles by copying existing. //! //! Source bundle handles' data and metadata are copied. If a handle is invalid, //! then its source is ignored. //! Created bundles are owned by this bundle. //! //! @param newNames Names for new children, if `nullptr` then names are taken from the source bundles. //! @param sourceBundles Handles to bundles whose data is to be copied. //! @param bundleCount Number of bundles to be copied. //! @param overwrite An option to overwrite existing child bundles. //! @param copiedBundles Output handles to the newly copied bundles. Can be `nullptr` if no output is required. //! @param copiedCount Number of successfully copied child bundles. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result copyChildBundles_abi( NameToken const* const newNames OMNI_ATTR("in, count=bundleCount"), ConstBundleHandle const* const sourceBundles OMNI_ATTR("in, not_null, count=bundleCount"), size_t bundleCount, bool overwrite, BundleHandle* const copiedBundles OMNI_ATTR("out, count=bundleCount"), size_t* const copiedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! <b>Feature not implemented yet.</b> //! //! @brief Link content from the source bundle to this bundle. //! //! If source handle is invalid, then operation is skipped. //! //! @param sourceBundle Handle to bundle whose data is to be linked. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result linkBundle_abi( ConstBundleHandle const* const sourceBundle OMNI_ATTR("in, not_null")) noexcept = 0; //! @brief Add a set of bundles as children to this bundle as links. //! //! Created bundles are links to other bundles that are part of another bundle. //! If target handle is invalid, then operation is skipped. //! The links are owned by this bundle, but targets of the links are not. //! Removing links from this bundle does not destroy the targets data. //! //! @param linkNames Names for new links. //! @param targetBundles Handles to bundles whose data is to be added. //! @param bundleCount Number of bundles to be added. //! @param linkedBundles Handles to linked bundles. Can be nullptr if no output is required. //! @param linkedCount Number of child bundles successfully linked. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result linkChildBundles_abi( NameToken const* const linkNames OMNI_ATTR("in, count=bundleCount"), ConstBundleHandle const* const targetBundles OMNI_ATTR("in, not_null, count=bundleCount"), size_t bundleCount, BundleHandle* const linkedBundles OMNI_ATTR("out, count=bundleCount"), size_t* const linkedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Remove attributes based on provided handles. //! //! Lookup the attribute handles and if they are part of this bundle then remove attributes' data and //! metadata. Attribute handles that are not part of this bundle are ignored. //! //! @param attributes Handles to attributes whose data is to be removed //! @param attributeCount Number of attributes to be removed. //! @param removedCount Number of attributes successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result removeAttributes_abi(ConstAttributeDataHandle const* const attributes OMNI_ATTR("in, not_null, count=attributeCount"), size_t attributeCount, size_t* const removedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Remove attributes based on provided names. //! //! Lookup the attribute names and if they are part of this bundle then remove attributes' data and //! metadata. Attribute names that are not part of this bundle are ignored. //! //! @param names The names of the attributes whose data is to be removed. //! @param nameCount Number of attributes to be removed. //! @param removedCount Number of attributes successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result removeAttributesByName_abi( NameToken const* const names OMNI_ATTR("in, not_null, count=nameCount"), size_t nameCount, size_t* const removedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Remove child bundles based on provided handles. //! //! Lookup the bundle handles and if they are children of the bundle then remove them and their metadata. //! Bundle handles that are not children of this bundle are ignored. //! Only empty child bundles can be removed. //! //! @param childHandles Handles to bundles to be removed. //! @param childCount Number of child bundles to be removed. //! @param removedCount Number of child bundles successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result removeChildBundles_abi( ConstBundleHandle const* const childHandles OMNI_ATTR("in, not_null, count=bundleCount"), size_t childCount, size_t* const removedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Remove child bundles based on provided names. //! //! Lookup the bundle names and if the are children of the bundle then remove them and their metadata. //! Bundle names that are not children of this bundle are ignored. //! Only empty child bundles can be removed. //! //! @param names The names of the child bundles to be removed. //! @param nameCount Number of child bundles to be removed. //! @param removedCount Number of child bundles successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result removeChildBundlesByName_abi( NameToken const* const names OMNI_ATTR("in, not_null, count=nameCount"), size_t nameCount, size_t* const removedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @deprecated Metadata storage is deprecated and invalid handle is returned. OMNI_ATTR("no_py") virtual BundleHandle getMetadataStorage_abi() noexcept = 0; //! @brief Search for bundle metadata fields based on provided names. //! //! Invalid attribute handles are returned for not existing names. //! //! @param fieldNames Bundle metadata field names to be searched for. //! @param fieldCount Size of fieldNames and bundleMetadata arrays. //! @param bundleMetadata Handles to bundle metadata fields in this bundle. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getBundleMetadataByName_abi( NameToken const* const fieldNames OMNI_ATTR("in, count=fieldCount"), size_t fieldCount, AttributeDataHandle* const bundleMetadata OMNI_ATTR("out, count=fieldCount")) noexcept = 0; //! @brief Create bundle metadata fields in this bundle. //! //! @param fieldNames Names of new bundle metadata fields. //! @param fieldTypes Types of new bundle metadata fields. //! @param elementCount Number of elements in the array, can be `nullptr` if field is not an array. //! @param fieldCount Size of fieldNames and fieldTypes arrays. //! @param bundleMetadata Handles to the newly created bundle metadata fields. Can be `nullptr` if no output is //! required. //! @param createdCount Number of child bundles successfully created. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result createBundleMetadata_abi( NameToken const* const fieldNames OMNI_ATTR("in, count=fieldCount"), Type const* const fieldTypes OMNI_ATTR("in, count=fieldCount"), size_t const* const elementCount OMNI_ATTR("in, count=fieldCount"), size_t fieldCount, AttributeDataHandle* const bundleMetadata OMNI_ATTR("out, count=fieldCount"), size_t* const createdCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Remove bundle metadata based on provided field names. //! //! @param fieldNames Names of the bundle metadata fields whose data is to be removed. //! @param fieldCount Number of the bundle metadata fields to be removed. //! @param removedCount Number of bundle metadata fields successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result removeBundleMetadata_abi( NameToken const* const fieldNames OMNI_ATTR("in, not_null, count=fieldCount"), size_t fieldCount, size_t* const removedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Search for read write field handles in the attribute by using field names. //! //! @copydetails IConstBundle2_abi::getConstAttributeMetadataByName_abi OMNI_ATTR("no_py") virtual omni::core::Result getAttributeMetadataByName_abi( NameToken attribute, NameToken const* const fieldNames OMNI_ATTR("in, count=fieldCount"), size_t fieldCount, AttributeDataHandle* const attributeMetadata OMNI_ATTR("out, count=fieldCount")) noexcept = 0; //! @brief Create attribute metadata fields. //! //! @param attribute Name of the attribute. //! @param fieldNames Names of new attribute metadata fields. //! @param fieldTypes Types of new attribute metadata fields. //! @param elementCount Number of elements in the array, can be `nullptr` if field is not an array. //! @param fieldCount Size of fieldNames and fieldTypes arrays. //! @param attributeMetadata Handles to the newly created attribute metadata. Can be `nullptr` if no output is //! required. //! @param createdCount Number of attribute metadata fields successfully created. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result createAttributeMetadata_abi( NameToken attribute, NameToken const* const fieldNames OMNI_ATTR("in, count=fieldCount"), Type const* const fieldTypes OMNI_ATTR("in, count=fieldCount"), size_t const* const elementCount OMNI_ATTR("in, count=fieldCount"), size_t fieldCount, AttributeDataHandle* const attributeMetadata OMNI_ATTR("out, count=fieldCount"), size_t* const createdCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Remove attribute metadata fields. //! //! @param attribute Name of the attribute. //! @param fieldNames Names of the attribute metadata fields to be removed. //! @param fieldCount Size of fieldNames array. //! @param removedCount Number of attribute metadata fields successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result removeAttributeMetadata_abi( NameToken attribute, NameToken const* const fieldNames OMNI_ATTR("in, count=fieldCount"), size_t fieldCount, size_t* const removedCount OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Remove all attributes, child bundles and metadata from this bundle, but keep the bundle itself. //! //! @param bundleMetadata Clears bundle metadata in this bundle. //! @param attributes Clears attributes in this bundle. //! @param childBundles Clears child bundles in this bundle. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result clearContents_abi(bool bundleMetadata, bool attributes, bool childBundles) noexcept = 0; }; } // namespace core } // namespace graph } // namespace omni #include "IBundle2.gen.h" //! @cond Doxygen_Suppress //! //! API part of the bundle factory interface //! @copydoc omni::graph::core::IBundle2_abi OMNI_DEFINE_INTERFACE_API(omni::graph::core::IBundle2) //! @endcond { public: //! @copydoc omni::graph::core::IBundle2::getAttributes_abi omni::core::Result getAttributes( omni::graph::core::AttributeDataHandle* const attributes, size_t attributeCount) noexcept { return getAttributes_abi(attributes, &attributeCount); } //! @brief Searches for read-write handle of the attribute in this bundle by using attribute name. //! //! @param name The name of the attribute to search for. //! @return Valid attribute handle if attribute if found, invalid handle otherwise. omni::graph::core::AttributeDataHandle getAttributeByName(omni::graph::core::NameToken name) noexcept { using namespace omni::graph::core; AttributeDataHandle out{ AttributeDataHandle::invalidValue() }; auto result = getAttributesByName_abi(&name, 1, &out); return OMNI_SUCCEEDED(result) ? out : AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::getChildBundles_abi omni::core::Result getChildBundles(omni::graph::core::BundleHandle* const bundles, size_t bundleCount) noexcept { return getChildBundles_abi(bundles, &bundleCount); } //! @brief Get the child bundle handle by index. //! //! If bundle index is out of range, then invalid handle is returned. //! //! @param bundleIndex Bundle index in range [0, childBundleCount). //! @return Bundle handle under the index. omni::graph::core::BundleHandle getChildBundle(size_t bundleIndex) noexcept { using namespace omni::graph::core; BundleHandle out{ BundleHandle::invalidValue() }; auto result = getChildBundle_abi(bundleIndex, &out); return OMNI_SUCCEEDED(result) ? out : BundleHandle{ BundleHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::copyAttributes_abi template <typename HANDLE> omni::core::Result copyAttributes(HANDLE const* const sourceAttributes, size_t attributeCount, bool overwrite = true, omni::graph::core::NameToken const* const newNames = nullptr, omni::graph::core::AttributeDataHandle* const copiedAttributes = nullptr, size_t* const copiedCount = nullptr) noexcept { using namespace omni::graph::core; static_assert( std::is_same<HANDLE, AttributeDataHandle>::value || std::is_same<HANDLE, ConstAttributeDataHandle>::value, "Only AttributeDataHandle and ConstAttributeDataHandle can be copied"); return copyAttributes_abi(newNames, reinterpret_cast<ConstAttributeDataHandle const* const>(sourceAttributes), attributeCount, overwrite, copiedAttributes, copiedCount); } //! @brief Create new attribute by copying existing attribute's data and metadata. //! //! Created attribute is owned by this bundle. //! //! @param sourceAttribute Handle to attribute whose data type is to be copied. //! @param overwrite Overwrites existing attributes. //! @param newName The new name for copied attribute. //! @return Output handle to the newly copied attribute. omni::graph::core::AttributeDataHandle copyAttribute( omni::graph::core::ConstAttributeDataHandle const& sourceAttribute, bool overwrite = true, omni::graph::core::NameToken newName = omni::fabric::kUninitializedToken) noexcept { using namespace omni::graph::core; AttributeDataHandle out{ AttributeDataHandle::invalidValue() }; NameToken* newNamePtr = newName == omni::fabric::kUninitializedToken ? nullptr : &newName; auto result = copyAttributes_abi(newNamePtr, &sourceAttribute, 1, overwrite, &out, nullptr); return OMNI_SUCCEEDED(result) ? out : AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::createAttributes_abi omni::core::Result createAttributes(omni::graph::core::NameToken const* const names, omni::graph::core::Type const* const types, size_t attributeCount, size_t const* const elementCount = nullptr, omni::graph::core::AttributeDataHandle* const createdAttributes = nullptr, size_t* const createdCount = nullptr) noexcept { return createAttributes_abi(names, types, elementCount, attributeCount, createdAttributes, createdCount); } //! @brief Create attribute based on provided name and type. //! //! Created attribute is owned by this bundle. //! //! @param name The name of the attribute. //! @param type The type of the attribute. //! @param elementCount Number of elements in the array. //! @return Output handle to the newly created attribute. omni::graph::core::AttributeDataHandle createAttribute( omni::graph::core::NameToken name, omni::graph::core::Type const& type, size_t elementCount = 0) noexcept { using namespace omni::graph::core; omni::core::Result result = kResultFail; AttributeDataHandle out{ AttributeDataHandle::invalidValue() }; size_t* elementCountPtr = type.arrayDepth == 0 ? nullptr : &elementCount; result = createAttributes_abi(&name, &type, elementCountPtr, 1, &out, nullptr); return OMNI_SUCCEEDED(result) ? out : AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::createAttributesLike_abi omni::core::Result createAttributesLike(omni::graph::core::ConstAttributeDataHandle const* const patternAttributes, size_t patternCount, omni::graph::core::AttributeDataHandle* const createdAttributes = nullptr, size_t* const createdCount = nullptr) noexcept { return createAttributesLike_abi(patternAttributes, patternCount, createdAttributes, createdCount); } //! @brief Use input attribute handle as pattern to create attribute in this bundle. //! //! The name and type are taken from pattern attribute, data is not copied. //! Created attribute is owned by this bundle. //! //! @param patternAttribute Attribute whose name and type is to be used to create new attribute. //! @return Output handle to the newly created attribute. omni::graph::core::AttributeDataHandle createAttributeLike( omni::graph::core::ConstAttributeDataHandle const& patternAttribute) noexcept { using namespace omni::graph::core; AttributeDataHandle out{ AttributeDataHandle::invalidValue() }; auto result = createAttributesLike_abi(&patternAttribute, 1, &out, nullptr); return OMNI_SUCCEEDED(result) ? out : AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::createChildBundles_abi omni::core::Result createChildBundles(omni::graph::core::NameToken const* const names, size_t nameCount, omni::graph::core::BundleHandle* const createdBundles = nullptr, size_t* const createdCount = nullptr) noexcept { return createChildBundles_abi(names, nameCount, createdBundles, createdCount); } //! @brief Create immediate child bundle under specified name. //! //! Created bundle is owned by this bundle. This method does not work recursively. Only immediate child can be //! created. //! //! @param name New child name in this bundle. //! @return Output handle to the newly created bundle. omni::graph::core::BundleHandle createChildBundle(omni::graph::core::NameToken name) noexcept { using namespace omni::graph::core; BundleHandle out{ BundleHandle::invalidValue() }; auto result = createChildBundles_abi(&name, 1, &out, nullptr); return OMNI_SUCCEEDED(result) ? out : BundleHandle{ BundleHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::linkAttributes_abi template <typename HANDLE> omni::core::Result linkAttributes(HANDLE const* const targetAttributes, size_t attributeCount, omni::graph::core::NameToken const* const linkNames = nullptr, omni::graph::core::AttributeDataHandle* const linkedAttributes = nullptr, size_t* const linkedCount = nullptr) noexcept { using namespace omni::graph::core; static_assert( std::is_same<HANDLE, AttributeDataHandle>::value || std::is_same<HANDLE, ConstAttributeDataHandle>::value, "Only AttributeDataHandle and ConstAttributeDataHandle can be copied"); auto linkedAttributesPtr = reinterpret_cast<ConstAttributeDataHandle const* const>(targetAttributes); return linkAttributes_abi(linkNames, linkedAttributesPtr, attributeCount, linkedAttributes, linkedCount); } //! < b>Feature not implemented yet.< /b> //! //! @brief Add an attribute to this bundle as link with custom name. //! //! Added attribute is a link to other attribute that is part of another bundle. //! The link is owned by this bundle, but target of the link is not. //! Removing link from this bundle does not destroy the data link points to. //! //! @param linkName Name for new link. //! @param targetAttribute Handle to attribute whose data is to be added. //! @return Output handle to linked attributes. omni::graph::core::AttributeDataHandle linkAttribute( omni::graph::core::ConstAttributeDataHandle const& targetAttribute, omni::graph::core::NameToken linkName = omni::fabric::kUninitializedToken) noexcept { using namespace omni::graph::core; AttributeDataHandle out{ AttributeDataHandle::invalidValue() }; auto result = linkAttributes_abi(&linkName, &targetAttribute, 1, &out, nullptr); return OMNI_SUCCEEDED(result) ? out : AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::copyBundle_abi omni::core::Result copyBundle(omni::graph::core::ConstBundleHandle sourceBundle, bool overwrite = true) noexcept { return copyBundle_abi(sourceBundle, overwrite); } //! @copydoc omni::graph::core::IBundle2_abi::copyChildBundles_abi template <typename HANDLE> omni::core::Result copyChildBundles(HANDLE const* const sourceBundles, size_t bundleCount, bool overwrite = true, omni::graph::core::NameToken const* const newNames = nullptr, omni::graph::core::BundleHandle* const copiedBundles = nullptr, size_t* const copiedCount = nullptr) noexcept { using namespace omni::graph::core; static_assert(std::is_same<HANDLE, BundleHandle>::value || std::is_same<HANDLE, ConstBundleHandle>::value, "Only BundleHandle and ConstBundleHandle can be copied"); auto sourceBundlesPtr = reinterpret_cast<ConstBundleHandle const* const>(sourceBundles); return copyChildBundles_abi(newNames, sourceBundlesPtr, bundleCount, overwrite, copiedBundles, copiedCount); } //! @brief Create new child bundle by copying existing bundle's data and metadata, with possibility of giving child //! a new name. //! //! Created bundle is owned by this bundle. //! //! @param sourceBundle Handle to bundle whose data is to be copied. //! @param overwrite An option to overwrite child bundle. //! @param newName Name for new child. //! @return Output handles to the newly copied bundle. omni::graph::core::BundleHandle copyChildBundle( omni::graph::core::ConstBundleHandle const& sourceBundle, bool overwrite = true, omni::graph::core::NameToken newName = omni::fabric::kUninitializedToken) noexcept { using namespace omni::graph::core; BundleHandle out{ BundleHandle::invalidValue() }; NameToken* newNamePtr = newName == omni::fabric::kUninitializedToken ? nullptr : &newName; auto result = copyChildBundles_abi(newNamePtr, &sourceBundle, 1, overwrite, &out, nullptr); return OMNI_SUCCEEDED(result) ? out : BundleHandle{ BundleHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::removeAttributes_abi template <typename HANDLE> omni::core::Result removeAttributes( HANDLE const* const attributes, size_t attributeCount, size_t* const removedCount = nullptr) noexcept { using namespace omni::graph::core; static_assert( std::is_same<HANDLE, AttributeDataHandle>::value || std::is_same<HANDLE, ConstAttributeDataHandle>::value, "Only AttributeDataHandle and ConstAttributeDataHandle can be copied"); auto attributesPtr = reinterpret_cast<ConstAttributeDataHandle const* const>(attributes); return removeAttributes_abi(attributesPtr, attributeCount, removedCount); } //! @brief Lookup the attribute handle and if it is part of this bundle then remove attributes' data and //! metadata. //! //! Attribute handle that is not part of this bundle is ignored. //! //! @param attribute Handle to attribute whose data is to be removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeAttribute(omni::graph::core::ConstAttributeDataHandle const& attribute) noexcept { return removeAttributes_abi(&attribute, 1, nullptr); } //! @copydoc omni::graph::core::IBundle2_abi::removeAttributesByName_abi omni::core::Result removeAttributesByName(omni::graph::core::NameToken const* const names, size_t nameCount, size_t* const removedCount = nullptr) noexcept { return removeAttributesByName_abi(names, nameCount, removedCount); } //! @brief Lookup the attribute by name and remove its data and metadata. //! //! @param name The name of the attribute whose data is to be removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeAttributeByName(omni::graph::core::NameToken name) noexcept { return removeAttributesByName_abi(&name, 1, nullptr); } //! @copydoc omni::graph::core::IBundle2_abi::removeChildBundles_abi template <typename HANDLE> omni::core::Result removeChildBundles( HANDLE const* const bundles, size_t bundleCount, size_t* const removedCount = nullptr) noexcept { using namespace omni::graph::core; static_assert(std::is_same<HANDLE, BundleHandle>::value || std::is_same<HANDLE, ConstBundleHandle>::value, "Only BundleHandle and ConstBundleHandle can be copied"); auto bundlesPtr = reinterpret_cast<ConstBundleHandle const* const>(bundles); return removeChildBundles_abi(bundlesPtr, bundleCount, removedCount); } //! @brief Lookup the bundle handle and if it is child of the bundle then remove it and its metadata. //! //! Bundle handle that is not child of this bundle is ignored. Only empty bundle can be removed. //! //! @param bundle Handle to bundle to be removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeChildBundle(omni::graph::core::ConstBundleHandle const& bundle) noexcept { return removeChildBundles_abi(&bundle, 1, nullptr); } //! @copydoc omni::graph::core::IBundle2_abi::removeChildBundlesByName_abi omni::core::Result removeChildBundlesByName(omni::graph::core::NameToken const* const names, size_t nameCount, size_t* const removedCount = nullptr) noexcept { return removeChildBundlesByName_abi(names, nameCount, removedCount); } //! @brief Lookup child bundle by name and remove its data and metadata. //! //! @param name The name of the child bundle to be removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeChildBundleByName(omni::graph::core::NameToken name) noexcept { return removeChildBundlesByName_abi(&name, 1, nullptr); } //! @brief Lookup for child under specified name. //! //! For child that is not found invalid handle is returned. //! //! @param name The name of bundle child in this bundle. //! @return Output handle to the found bundle. omni::graph::core::BundleHandle getChildBundleByName(omni::graph::core::NameToken name) noexcept { using namespace omni::graph::core; BundleHandle out{ BundleHandle::invalidValue() }; auto result = getChildBundlesByName_abi(&name, 1, &out); return OMNI_SUCCEEDED(result) ? out : BundleHandle{ BundleHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::getBundleMetadataByName_abi using omni::core::Generated<omni::graph::core::IBundle2_abi>::getBundleMetadataByName; //! @brief Search for bundle metadata field based on provided name. //! //! Invalid attribute handle is returned for not existing name. //! //! @param fieldName Bundle metadata field name to be searched for. //! //! @return Valid metadata attribute handle for existing name, or Invalid handle for not existing. omni::graph::core::AttributeDataHandle getBundleMetadataByName(omni::graph::core::NameToken fieldName) noexcept { using namespace omni::graph::core; AttributeDataHandle out{ AttributeDataHandle::invalidValue() }; auto result = getBundleMetadataByName_abi(&fieldName, 1, &out); return OMNI_SUCCEEDED(result) ? out : AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::createBundleMetadata_abi omni::core::Result createBundleMetadata(omni::graph::core::NameToken const* const fieldNames, omni::graph::core::Type const* const fieldTypes, size_t fieldCount, size_t const* const elementCount = nullptr, omni::graph::core::AttributeDataHandle* const bundleMetadata = nullptr, size_t* const createdCount = nullptr) noexcept { return createBundleMetadata_abi(fieldNames, fieldTypes, elementCount, fieldCount, bundleMetadata, createdCount); } //! @brief Create bundle metadata field in this bundle. //! //! @param fieldName Name of new bundle metadata field. //! @param fieldType Type of new bundle metadata field. //! @param elementCount Number of elements in the array, if fieldType is an array type. //! //! @return Valid metadata attribute handle for existing name, or Invalid handle for not existing. omni::graph::core::AttributeDataHandle createBundleMetadata( omni::graph::core::NameToken fieldName, omni::graph::core::Type const& fieldType, size_t elementCount = 0) noexcept { using namespace omni::graph::core; AttributeDataHandle out{ AttributeDataHandle::invalidValue() }; auto result = createBundleMetadata_abi(&fieldName, &fieldType, &elementCount, 1, &out, nullptr); return OMNI_SUCCEEDED(result) ? out : AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::removeBundleMetadata_abi omni::core::Result removeBundleMetadata(omni::graph::core::NameToken const* const fieldNames, size_t fieldCount, size_t* const removedCount = nullptr) noexcept { return removeBundleMetadata_abi(fieldNames, fieldCount, removedCount); } //! @brief Remove bundle metadata based on provided field name. //! //! @param fieldName Name of the bundle metadata field whose data is to be removed. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeBundleMetadata(omni::graph::core::NameToken fieldName) noexcept { return removeBundleMetadata_abi(&fieldName, 1, nullptr); } //! @copydoc omni::graph::core::IBundle2_abi::getAttributeMetadataByName_abi using omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributeMetadataByName; //! @brief Search for read-write metadata field handle for the attribute by using field name. //! //! @param attribute The name of the attribute. //! @param fieldName The name of attribute metadata field to be searched for. //! //! @return Valid metadata attribute handle for existing name, or Invalid handle for not existing. omni::graph::core::AttributeDataHandle getAttributeMetadataByName( omni::graph::core::NameToken attribute, omni::graph::core::NameToken fieldName) noexcept { using namespace omni::graph::core; AttributeDataHandle out{ AttributeDataHandle::invalidValue() }; auto result = getAttributeMetadataByName_abi(attribute, &fieldName, 1, &out); return OMNI_SUCCEEDED(result) ? out : AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::createAttributeMetadata_abi omni::core::Result createAttributeMetadata( omni::graph::core::NameToken attribute, omni::graph::core::NameToken const* const fieldNames, omni::graph::core::Type const* const fieldTypes, size_t fieldCount, size_t const* const elementCount = nullptr, omni::graph::core::AttributeDataHandle* const attributeMetadata = nullptr, size_t* const createdCount = nullptr) noexcept { return createAttributeMetadata_abi( attribute, fieldNames, fieldTypes, elementCount, fieldCount, attributeMetadata, createdCount); } //! @brief Create attribute metadata field. //! //! @param attribute Name of the attribute. //! @param fieldName Name of new attribute metadata field. //! @param fieldType Type of new attribute metadata field. //! @param elementCount Number of elements in the array. //! @return Handle to the newly created attribute metadata field. omni::graph::core::AttributeDataHandle createAttributeMetadata( omni::graph::core::NameToken attribute, omni::graph::core::NameToken fieldName, omni::graph::core::Type const& fieldType, size_t elementCount = 0) noexcept { using namespace omni::graph::core; AttributeDataHandle out{ AttributeDataHandle::invalidValue() }; size_t* elementCountPtr = fieldType.arrayDepth == 0 ? nullptr : &elementCount; auto result = createAttributeMetadata_abi(attribute, &fieldName, &fieldType, elementCountPtr, 1, &out, nullptr); return OMNI_SUCCEEDED(result) ? out : AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IBundle2_abi::removeAttributeMetadata_abi omni::core::Result removeAttributeMetadata(omni::graph::core::NameToken attribute, omni::graph::core::NameToken const* const fieldNames, size_t fieldCount, size_t* const removedCount = nullptr) noexcept { return removeAttributeMetadata_abi(attribute, fieldNames, fieldCount, removedCount); } //! @copydoc omni::graph::core::IBundle2_abi::removeAttributeMetadata_abi using omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributeMetadata; //! @brief Remove attribute metadata field. //! //! @param attribute Name of the attribute. //! @param fieldName Name of the attribute metadata field to be removed. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeAttributeMetadata( omni::graph::core::NameToken attribute, omni::graph::core::NameToken fieldName) noexcept { return removeAttributeMetadata_abi(attribute, &fieldName, 1, nullptr); } //! @copydoc omni::graph::core::IBundle2_abi::clearContents_abi omni::core::Result clearContents(bool bundleMetadata = true, bool attributes = true, bool childBundles = true) noexcept { return clearContents_abi(bundleMetadata, attributes, childBundles); } }; // IBundle2
omniverse-code/kit/include/omni/graph/core/bundle/IBundleFactory1.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/IBundle.h> namespace omni { namespace graph { namespace core { OMNI_DECLARE_INTERFACE(IBundleFactory); //! Interface to create new bundles class IBundleFactory_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.IBundleFactory")> { protected: //! Create bundles at given paths and acquire instances of IBundle2 interface. //! //! @param contextObj The context where bundles are created. //! @param paths Locations for new bundles. //! @param pathCount Length of paths array. //! @param createdBundles Output instances of IBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result createBundles_abi(GraphContextObj const* const contextObj OMNI_ATTR("in, not_null"), omni::fabric::PathC const* const paths OMNI_ATTR("in, not_null"), size_t pathCount, IBundle2** const createdBundles OMNI_ATTR("out, *not_null, count=pathCount")) noexcept = 0; //! Acquire instances of IConstBundle2 interface from const bundle handles. //! //! @param contextObj The context where bundles belong to. //! @param bundleHandles The bundle handles. //! @param bundleCount Length of bundleHandles array. //! @param bundles Output instances of IConstBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getConstBundles_abi( GraphContextObj const* const contextObj OMNI_ATTR("in, not_null"), ConstBundleHandle const* const bundleHandles OMNI_ATTR("in, not_null"), size_t bundleCount, IConstBundle2** const bundles OMNI_ATTR("out, *not_null, count=bundleCount")) noexcept = 0; //! Acquire instances of IBundle2 interface from bundle handles. //! //! @param contextObj The context where bundles belong to. //! @param bundleHandles The bundle handles. //! @param bundleCount Length of bundleHandles array. //! @param bundles Output instances of IConstBundle2 interface. //! //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getBundles_abi(GraphContextObj const* const contextObj OMNI_ATTR("in, not_null"), BundleHandle const* const bundleHandles OMNI_ATTR("in, not_null"), size_t bundleCount, IBundle2** const bundles OMNI_ATTR("out, *not_null, count=bundleCount")) noexcept = 0; }; } // namespace core } // namespace graph } // namespace omni #include "IBundleFactory1.gen.h" //! @cond Doxygen_Suppress //! //! API part of the bundle factory interface OMNI_DEFINE_INTERFACE_API(omni::graph::core::IBundleFactory) //! @endcond { public: //! @brief Create a list of Bundle objects //! //! @param contextObj The context where bundles are created. //! @param paths Locations for new bundles. //! @param pathCount Length of paths array. //! @param createdBundles Output instances of IBundle2 interface. //! //! @return true if bundles were created //! @return false if bundle creation failed or arguments were invalid bool createBundles(omni::graph::core::GraphContextObj const& contextObj, omni::fabric::PathC const* const paths, size_t pathCount, omni::core::ObjectPtr<omni::graph::core::IBundle2>* const createdBundles) noexcept { static_assert(sizeof(omni::core::ObjectPtr<omni::graph::core::IBundle2>) == sizeof(omni::graph::core::IBundle2*), "ObjectPtr and IBundle2 pointer requires to be the same size!"); auto result = createBundles_abi( &contextObj, paths, pathCount, reinterpret_cast<omni::graph::core::IBundle2**>(createdBundles)); if (OMNI_FAILED(result)) { OMNI_LOG_ERROR("unable to create bundles: 0x%08X", result); return false; } return true; } //! Create bundle at given path and acquire instance of IBundle2 interface. //! //! @param contextObj The context where bundle is created. //! @param path Locations for new bundle. //! //! @return Smart pointer that manages lifetime of IBundle2 instance. omni::core::ObjectPtr<omni::graph::core::IBundle2> createBundle(omni::graph::core::GraphContextObj const& contextObj, omni::fabric::PathC path) noexcept { omni::core::ObjectPtr<omni::graph::core::IBundle2> out; createBundles(contextObj, &path, 1, &out); return out; } //! Acquire instances of IConstBundle2 interface from constant bundle handles. //! //! @param contextObj The context where bundles belong to. //! @param bundleHandles The bundle handles. //! @param bundleCount Length of bundleHandles array. //! @param bundles Smart pointers that manage lifetime of IConstBundle2 instances. //! //! @return true if operation was successful, false otherwise. template <typename HANDLE> bool getConstBundles(omni::graph::core::GraphContextObj const& contextObj, HANDLE const* const bundleHandles, size_t bundleCount, omni::core::ObjectPtr<omni::graph::core::IConstBundle2>* const bundles) noexcept { static_assert(sizeof(omni::core::ObjectPtr<omni::graph::core::IConstBundle2>) == sizeof(omni::graph::core::IConstBundle2*), "ObjectPtr and IBundle2 pointer requires to be the same size!"); auto result = getConstBundles_abi( &contextObj, bundleHandles, bundleCount, reinterpret_cast<omni::graph::core::IConstBundle2**>(bundles)); if (OMNI_FAILED(result)) { OMNI_LOG_ERROR("unable to get bundles: 0x%08X", result); return false; } return true; } //! Acquire instance of IConstBundle2 interface from constant bundle handle. //! //! @param contextObj The context where bundles belong to. //! @param bundleHandle The bundle handle. //! //! @return Smart pointer that manages lifetime of IConstBundle2 instance. omni::core::ObjectPtr<omni::graph::core::IConstBundle2> getConstBundle( omni::graph::core::GraphContextObj const& contextObj, omni::graph::core::ConstBundleHandle const& bundleHandle) noexcept { omni::core::ObjectPtr<omni::graph::core::IConstBundle2> out; getConstBundles(contextObj, &bundleHandle, 1, &out); return out; } //! Acquire instances of IBundle2 interface from bundle handles. //! //! @param contextObj The context where bundles belong to. //! @param bundleHandles The bundle handles. //! @param bundleCount Length of bundleHandles array. //! @param createdBundles Smart pointers that manage lifetime of IBundle2 instances. //! //! @return true if operation was successful, false otherwise. bool getBundles(omni::graph::core::GraphContextObj const& contextObj, omni::graph::core::BundleHandle const* const bundleHandles, size_t bundleCount, omni::core::ObjectPtr<omni::graph::core::IBundle2>* const createdBundles) noexcept { static_assert(sizeof(omni::core::ObjectPtr<omni::graph::core::IBundle2>) == sizeof(omni::graph::core::IBundle2*), "ObjectPtr and IBundle2 pointer requires to be the same size!"); auto result = getBundles_abi( &contextObj, bundleHandles, bundleCount, reinterpret_cast<omni::graph::core::IBundle2**>(createdBundles)); if (OMNI_FAILED(result)) { OMNI_LOG_ERROR("unable to get bundles: 0x%08X", result); return false; } return true; } //! Acquire instance of IBundle2 interface from constant bundle handle. //! //! @param contextObj The context where bundles belong to. //! @param bundleHandle The bundle handle. //! //! @return Smart pointer that manages lifetime of IBundle2 instance. omni::core::ObjectPtr<omni::graph::core::IBundle2> getBundle(omni::graph::core::GraphContextObj const& contextObj, omni::graph::core::BundleHandle const& bundleHandle) noexcept { omni::core::ObjectPtr<omni::graph::core::IBundle2> out; getBundles(contextObj, &bundleHandle, 1, &out); return out; } };
omniverse-code/kit/include/omni/graph/core/bundle/IBundleChanges1.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Interface for monitoring and handling changes in bundles and attributes. //! //! The IBundleChanges_abi is an interface that provides methods for checking whether bundles and attributes //! have been modified, and cleaning them if they have been modified. This is particularly useful in scenarios //! where it's crucial to track changes and maintain the state of bundles and attributes. //! //! This interface provides several methods for checking and cleaning modifications, each catering to different //! use cases such as handling single bundles, multiple bundles, attributes, or specific attributes of a single bundle. //! //! The methods of this interface return a BundleChangeType enumeration that indicates whether the checked entity //! (bundle or attribute) has been modified. template <> class omni::core::Generated<omni::graph::core::IBundleChanges_abi> : public omni::graph::core::IBundleChanges_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IBundleChanges") //! @brief Activate tracking for specific bundle on its attributes and children. //! @param handle to the specific bundles to enable change tracking. //! @return An omni::core::Result indicating the success of the operation. omni::core::Result activateChangeTracking(const omni::graph::core::BundleHandle& handle) noexcept; //! @brief Deactivate tracking for specific bundle on its attributes and children. //! @param handle to the specific bundles to enable change tracking. //! @return An omni::core::Result indicating the success of the operation. omni::core::Result deactivateChangeTracking(const omni::graph::core::BundleHandle& handle) noexcept; //! @brief Retrieves the change status of an array of bundles. //! //! This method is used to check if any of the bundles in the provided array have been modified. //! //! The count parameter indicates the size of the bundles array, as well as the changes output array. //! //! @param bundles An array of handles to the specific bundles to check for modifications. //! @param count The number of bundle handles in the array. //! @param changes An array that will be filled with BundleChangeType values for each bundle. //! //! @returns An omni::core::Result indicating the success of the operation. omni::core::Result getChangesForBundles(const omni::graph::core::ConstBundleHandle* const bundles, size_t count, omni::graph::core::BundleChangeType* const changes); //! @brief Retrieves the change status of an array of attributes. //! //! This method is used to check if any of the attributes in the provided array have been modified. //! //! The count parameter indicates the size of the attributes array, as well as the changes output array. //! //! @param attributes An array of handles to the attributes to check for modifications. //! @param count The number of attribute handles in the array. //! @param changes An array that will be filled with BundleChangeType values for each attribute. //! //! @returns An omni::core::Result indicating the success of the operation. omni::core::Result getChangesForAttributes(const omni::graph::core::ConstAttributeDataHandle* const attributes, size_t count, omni::graph::core::BundleChangeType* const changes); //! Clears all recorded changes. //! //! This method is used to clear or reset all the recorded changes of the bundles and attributes. //! It can be used when the changes have been processed and need to be discarded. //! //! An omni::core::Result indicating the success of the operation. omni::core::Result clearChanges() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleChanges_abi>::activateChangeTracking( const omni::graph::core::BundleHandle& handle) noexcept { return activateChangeTracking_abi(handle); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleChanges_abi>::deactivateChangeTracking( const omni::graph::core::BundleHandle& handle) noexcept { return deactivateChangeTracking_abi(handle); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleChanges_abi>::getChangesForBundles( const omni::graph::core::ConstBundleHandle* const bundles, size_t count, omni::graph::core::BundleChangeType* const changes) { OMNI_THROW_IF_ARG_NULL(bundles); OMNI_THROW_IF_ARG_NULL(changes); auto return_ = getChangesForBundles_abi(bundles, count, changes); return return_; } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleChanges_abi>::getChangesForAttributes( const omni::graph::core::ConstAttributeDataHandle* const attributes, size_t count, omni::graph::core::BundleChangeType* const changes) { OMNI_THROW_IF_ARG_NULL(attributes); OMNI_THROW_IF_ARG_NULL(changes); auto return_ = getChangesForAttributes_abi(attributes, count, changes); return return_; } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleChanges_abi>::clearChanges() noexcept { return clearChanges_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/core/bundle/IConstBundle2.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IConstBundle.h //! //! @brief Defines Read only interfaces for recursive bundles. #pragma once #include <omni/core/IObject.h> #include <omni/graph/core/Handle.h> #include <omni/graph/core/Type.h> #include <omni/log/ILog.h> OMNI_LOG_DECLARE_CHANNEL(kBundleChannel) namespace omni { namespace graph { namespace core { //! Declare the interface definition OMNI_DECLARE_INTERFACE(IConstBundle2); //! Provide read only access to recursive bundles. class IConstBundle2_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.IConstBundle2")> { protected: //! Return true if this bundle is valid, false otherwise. virtual bool isValid_abi() noexcept = 0; //! Return the context of this bundle. OMNI_ATTR("no_py") virtual GraphContextObj getContext_abi() noexcept = 0; //! Return Handle to this bundle. Invalid handle is returned if this bundle is invalid. OMNI_ATTR("no_py") virtual ConstBundleHandle getConstHandle_abi() noexcept = 0; //! Return full path of this bundle. OMNI_ATTR("no_py") virtual omni::fabric::PathC getPath_abi() noexcept = 0; //! Return name of this bundle OMNI_ATTR("no_py") virtual NameToken getName_abi() noexcept = 0; //! Return handle to the parent of this bundle. Invalid handle is returned if bundle has no parent. OMNI_ATTR("no_py") virtual ConstBundleHandle getConstParentBundle_abi() noexcept = 0; //! @brief Get the names and types of all attributes in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when names and types are `nullptr`. When in this mode, *nameAndTypeCount //! will be populated with the number of attributes in the bundle. //! //! **Get mode** is enabled when names or types is not `nullptr`. Upon entering the function, *nameAndTypeCount //! stores the number of entries in names and types. In **Get mode** names are not nullptr, names array is populated //! with attribute names. In **Get mode** types are not nullptr, types array is populated with attribute types. //! //! @param names The names of the attributes. //! @param types The types of the attributes. //! @param nameAndTypeCount must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getAttributeNamesAndTypes_abi( NameToken* const names OMNI_ATTR("out, count=*nameAndTypeCount"), Type* const types OMNI_ATTR("out, count=*nameAndTypeCount"), size_t* const nameAndTypeCount OMNI_ATTR("in, out, not_null")) noexcept = 0; //! @brief Get read only handles to all attributes in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when attributes is `nullptr`. When in this mode, *attributeCount //! will be populated with the number of attributes in the bundle. //! //! **Get mode** is enabled when attributes is not `nullptr`. Upon entering the function, *attributeCount //! stores the number of entries in attributes. //! In **Get mode** attributes are not nullptr, attributes array is populated with attribute handles in the bundle. //! //! @param attributes The buffer to store handles of the attributes in this bundle. //! @param attributeCount Size of attributes buffer. Must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getConstAttributes_abi( ConstAttributeDataHandle* const attributes OMNI_ATTR("out, not_null, count=*attributeCount"), size_t* const attributeCount OMNI_ATTR("in, out, not_null")) noexcept = 0; //! @brief Search for read only handles of the attribute in this bundle by using attribute names. //! //! @param names The name of the attributes to be searched for. //! @param nameCount Size of names buffer. //! @param attributes The buffer to store handles of the attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getConstAttributesByName_abi( NameToken const* const names OMNI_ATTR("in, not_null, count=nameCount"), size_t nameCount, ConstAttributeDataHandle* const attributes OMNI_ATTR("out, not_null, count=nameCount")) noexcept = 0; //! @brief Get read only handles to all child bundles in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when bundles is `nullptr`. When in this mode, *bundleCount //! will be populated with the number of bundles in the bundle. //! //! **Get mode** is enabled when bundles is not `nullptr`. Upon entering the function, *bundleCount //! stores the number of entries in bundles. //! In **Get mode** bundles are not nullptr, bundles array is populated with bundle handles in the bundle. //! //! @param bundles The buffer to save child bundle handles. //! @param bundleCount Size of the bundles buffer. Must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getConstChildBundles_abi( ConstBundleHandle* const bundles OMNI_ATTR("out, not_null, count=*bundleCount"), size_t* const bundleCount OMNI_ATTR("in, out, not_null")) noexcept = 0; //! @brief Get read only handle to child bundle by index. //! //! @param bundleIndex Bundle index in range [0, childBundleCount). //! @param bundle Handle under the index. If bundle index is out of range, then invalid handle is returned. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getConstChildBundle_abi( size_t bundleIndex, ConstBundleHandle* const bundle OMNI_ATTR("out, not_null")) noexcept = 0; //! @brief Lookup for read only handles to child bundles under specified names. //! //! For children that are not found invalid handles are returned. //! //! @param names The names of the child bundles in this bundle. //! @param nameCount The number of child bundles to be searched. //! @param foundBundles Output handles to the found bundles. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getConstChildBundlesByName_abi( NameToken const* const names OMNI_ATTR("in, not_null, count=nameCount"), size_t nameCount, ConstBundleHandle* const foundBundles OMNI_ATTR("out, not_null, count=nameCount")) noexcept = 0; //! @deprecated Metadata storage is deprecated and invalid handle is returned. OMNI_ATTR("no_py") virtual ConstBundleHandle getConstMetadataStorage_abi() noexcept = 0; //! @brief Get the names and types of all bundle metadata fields in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when fieldNames and fieldTypes are `nullptr`. When in this mode, *fieldCount //! will be populated with the number of metadata fields in this bundle. //! //! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function, //! *fieldCount stores the number of entries in fieldNames and @p fieldTypes. //! //! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names. //! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types. //! //! @param fieldNames Output field names in this bundle. //! @param fieldTypes Output field types in this bundle. //! @param fieldCount must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getBundleMetadataNamesAndTypes_abi( NameToken* const fieldNames OMNI_ATTR("out, count=*fieldCount"), Type* const fieldTypes OMNI_ATTR("out, count=*fieldCount"), size_t* const fieldCount OMNI_ATTR("in, out, not_null")) noexcept = 0; //! @brief Search for field handles in this bundle by using field names. //! //!@param fieldNames Name of bundle metadata fields to be searched for. //!@param fieldCount Size of fieldNames and bundleMetadata arrays. //!@param bundleMetadata Handle to metadata fields in this bundle. //!@return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getConstBundleMetadataByName_abi( NameToken const* const fieldNames OMNI_ATTR("in, count=fieldCount"), size_t fieldCount, ConstAttributeDataHandle* const bundleMetadata OMNI_ATTR("out, count=fieldCount")) noexcept = 0; //! @brief Get the names and types of all attribute metadata fields in the attribute. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when fieldNames and @p fieldTypes are `nullptr`. When in this mode, *fieldCount //! will be populated with the number of metadata fields in the attribute. //! //! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function, //! *fieldCount stores the number of entries in fieldNames and fieldTypes. //! //! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names. //! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types. //! //! @param attribute Name of the attribute. //! @param fieldNames Output field names in the attribute. //! @param fieldTypes Output field types in the attribute. //! @param fieldCount must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getAttributeMetadataNamesAndTypes_abi( NameToken attribute, NameToken* const fieldNames OMNI_ATTR("out, count=*fieldCount"), Type* const fieldTypes OMNI_ATTR("out, count=*fieldCount"), size_t* const fieldCount OMNI_ATTR("in, out, not_null")) noexcept = 0; //! @brief Search for read only field handles in the attribute by using field names. //! //! @param attribute The name of the attribute. //! @param fieldNames The names of attribute metadata fields to be searched for. //! @param fieldCount Size of fieldNames and attributeMetadata arrays. //! @param attributeMetadata Handles to attribute metadata fields in the attribute. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. OMNI_ATTR("no_py") virtual omni::core::Result getConstAttributeMetadataByName_abi( NameToken attribute, NameToken const* const fieldNames OMNI_ATTR("in, count=fieldCount"), size_t fieldCount, ConstAttributeDataHandle* const attributeMetadata OMNI_ATTR("out, count=fieldCount")) noexcept = 0; }; } // namespace core } // namespace graph } // namespace omni #include "IConstBundle2.gen.h" //! @cond Doxygen_Suppress //! //! API part of the bundle factory interface //! @copydoc omni::graph::core::IConstBundle2_abi OMNI_DEFINE_INTERFACE_API(omni::graph::core::IConstBundle2) //! @endcond { public: //! Return number of attributes in this bundle. size_t getAttributeCount() noexcept { size_t attributeCount = 0; auto result = getConstAttributes_abi(nullptr, &attributeCount); return OMNI_SUCCEEDED(result) ? attributeCount : 0; } //! Return number of child bundles in this bundle. size_t getChildBundleCount() noexcept { size_t bundleCount = 0; auto result = getConstChildBundles_abi(nullptr, &bundleCount); return OMNI_SUCCEEDED(result) ? bundleCount : 0; } //! @brief Get the names of all attributes in this bundle. //! //! @param names Output array of attribute names. Size must be at least nameCount. //! @param nameCount Length of names array. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getAttributeNames(omni::graph::core::NameToken* const names, size_t nameCount) noexcept { return getAttributeNamesAndTypes_abi(names, nullptr, &nameCount); } //! @brief Get the types of all attributes in this bundle. //! //! @param types Output array of attribute types. Size must be at least typeCount. //! @param typeCount Length of types array. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getAttributeTypes(omni::graph::core::Type* const types, size_t typeCount) noexcept { return getAttributeNamesAndTypes_abi(nullptr, types, &typeCount); } //! @copydoc omni::graph::core::IConstBundle2_abi::getConstAttributes_abi omni::core::Result getConstAttributes( omni::graph::core::ConstAttributeDataHandle* const attributes, size_t attributeCount) noexcept { return getConstAttributes_abi(attributes, &attributeCount); } //! @brief Search for attribute handle in this bundle by using attribute name. //! //! @param name The name of the attribute to search for. //! @return Valid attribute handle if attribute if found, invalid handle otherwise. omni::graph::core::ConstAttributeDataHandle getConstAttributeByName(omni::graph::core::NameToken name) noexcept { using namespace omni::graph::core; ConstAttributeDataHandle out{ ConstAttributeDataHandle::invalidValue() }; auto result = getConstAttributesByName_abi(&name, 1, &out); return OMNI_SUCCEEDED(result) ? out : ConstAttributeDataHandle{ ConstAttributeDataHandle::invalidValue() }; } //! @copydoc omni::graph::core::IConstBundle2_abi::getConstChildBundles_abi omni::core::Result getConstChildBundles(omni::graph::core::ConstBundleHandle* const bundles, size_t bundleCount) noexcept { return getConstChildBundles_abi(bundles, &bundleCount); } //! @brief @copybrief omni::graph::core::IConstBundle2_abi::getConstChildBundle_abi //! //! @param bundleIndex Bundle index in range [0, childBundleCount). //! @return Valid bundle handle if child bundle is found, invalid handle otherwise. omni::graph::core::ConstBundleHandle getConstChildBundle(size_t bundleIndex) noexcept { using namespace omni::graph::core; ConstBundleHandle out{ ConstBundleHandle::invalidValue() }; auto result = getConstChildBundle_abi(bundleIndex, &out); return OMNI_SUCCEEDED(result) ? out : ConstBundleHandle{ ConstBundleHandle::invalidValue() }; } //! @brief @copybrief omni::graph::core::IConstBundle2_abi::getConstChildBundlesByName_abi //! //! @param name The name of the child bundle. //! @return Valid bundle handle if child bundle is found, invalid handle otherwise. omni::graph::core::ConstBundleHandle getConstChildBundleByName(omni::graph::core::NameToken name) noexcept { using namespace omni::graph::core; ConstBundleHandle out{ ConstBundleHandle::invalidValue() }; auto result = getConstChildBundlesByName_abi(&name, 1, &out); return OMNI_SUCCEEDED(result) ? out : ConstBundleHandle{ ConstBundleHandle::invalidValue() }; } //! Return number of metadata fields in this bundle. size_t getBundleMetadataCount() noexcept { size_t bundleMetadataCount = 0; auto result = getBundleMetadataNamesAndTypes_abi(nullptr, nullptr, &bundleMetadataCount); return OMNI_SUCCEEDED(result) ? bundleMetadataCount : 0; } //! @copydoc omni::graph::core::IConstBundle2_abi::getBundleMetadataNamesAndTypes_abi omni::core::Result getBundleMetadataNamesAndTypes(omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t fieldCount) noexcept { return getBundleMetadataNamesAndTypes_abi(fieldNames, fieldTypes, &fieldCount); } //! @copydoc omni::graph::core::IConstBundle2_abi::getConstBundleMetadataByName_abi using omni::core::Generated<IConstBundle2_abi>::getConstBundleMetadataByName; //! @brief @copybrief omni::graph::core::IConstBundle2_abi::getConstBundleMetadataByName_abi //! //! @param fieldName The name of the bundle metadata field. //! @return Valid attribute handle if bundle metadata is found, invalid handle otherwise. omni::graph::core::ConstAttributeDataHandle getConstBundleMetadataByName(omni::graph::core::NameToken fieldName) noexcept { using namespace omni::graph::core; ConstAttributeDataHandle out{ ConstAttributeDataHandle::invalidValue() }; Result result = getConstBundleMetadataByName_abi(&fieldName, 1, &out); return OMNI_SUCCEEDED(result) ? out : ConstAttributeDataHandle{ ConstAttributeDataHandle::invalidValue() }; } //! @brief Return Number of metadata fields in the attribute. //! //! @param attribute The name of the attribute. size_t getAttributeMetadataCount(omni::graph::core::NameToken attribute) noexcept { size_t attributeMetadataCount = 0; auto result = getAttributeMetadataNamesAndTypes_abi(attribute, nullptr, nullptr, &attributeMetadataCount); return OMNI_SUCCEEDED(result) ? attributeMetadataCount : 0; } //! @copydoc omni::graph::core::IConstBundle2_abi::getAttributeMetadataNamesAndTypes_abi omni::core::Result getAttributeMetadataNamesAndTypes( omni::graph::core::NameToken attribute, omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t fieldCount) noexcept { return getAttributeMetadataNamesAndTypes_abi(attribute, fieldNames, fieldTypes, &fieldCount); } //! @copydoc omni::graph::core::IConstBundle2_abi::getConstAttributeMetadataByName_abi using omni::core::Generated<IConstBundle2_abi>::getConstAttributeMetadataByName; //! @brief @copybrief omni::graph::core::IConstBundle2_abi::getConstAttributeMetadataByName_abi //! //! @param attribute The name of the attribute. //! @param fieldName The name of the field. //! @return Valid attribute handle if bundle metadata is found, invalid handle otherwise. omni::graph::core::ConstAttributeDataHandle getConstAttributeMetadataByName( omni::graph::core::NameToken attribute, omni::graph::core::NameToken fieldName) noexcept { using namespace omni::graph::core; ConstAttributeDataHandle out{ ConstAttributeDataHandle::invalidValue() }; auto result = getConstAttributeMetadataByName_abi(attribute, &fieldName, 1, &out); return OMNI_SUCCEEDED(result) ? out : ConstAttributeDataHandle{ ConstAttributeDataHandle::invalidValue() }; } }; // IConstBundle2
omniverse-code/kit/include/omni/graph/core/bundle/PyIBundleFactory2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIBundleFactory2(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::IBundleFactory2_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::IBundleFactory2_abi>>, omni::core::Api<omni::graph::core::IBundleFactory_abi>> clsParent(m, "_IBundleFactory2"); py::class_<omni::graph::core::IBundleFactory2, omni::core::Generated<omni::graph::core::IBundleFactory2_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::IBundleFactory2>, omni::core::Api<omni::graph::core::IBundleFactory_abi>> cls(m, "IBundleFactory2", R"OMNI_BIND_RAW_(IBundleFactory version 2. The version 2 allows to retrieve instances of IBundle instances from paths.)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::IBundleFactory2>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::IBundleFactory2>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::IBundleFactory2 instantiation"); } return tmp; })); return omni::python::PyBind<omni::graph::core::IBundleFactory2>::bind(cls); }
omniverse-code/kit/include/omni/graph/core/bundle/PyIBundle2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIBundle2(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::IBundle2_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::IBundle2_abi>>, omni::core::Api<omni::graph::core::IConstBundle2_abi>> clsParent(m, "_IBundle2"); py::class_<omni::graph::core::IBundle2, omni::core::Generated<omni::graph::core::IBundle2_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::IBundle2>, omni::core::Api<omni::graph::core::IConstBundle2_abi>> cls(m, "IBundle2", R"OMNI_BIND_RAW_(Provide read write access to recursive bundles.)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::IBundle2>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::IBundle2>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::IBundle2 instantiation"); } return tmp; })); return omni::python::PyBind<omni::graph::core::IBundle2>::bind(cls); }
omniverse-code/kit/include/omni/graph/core/bundle/IBundle2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Provide read write access to recursive bundles. template <> class omni::core::Generated<omni::graph::core::IBundle2_abi> : public omni::graph::core::IBundle2_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IBundle2") //! Return handle to this bundle. Invalid handle is returned if this bundle is invalid. omni::graph::core::BundleHandle getHandle() noexcept; //! Return parent of this bundle, or invalid handle if there is no parent. omni::graph::core::BundleHandle getParentBundle() noexcept; //! @brief Get read-write handles to all attributes in this bundle. //! //! @copydetails IConstBundle2_abi::getConstAttributes_abi omni::core::Result getAttributes(omni::graph::core::AttributeDataHandle* const attributes, size_t* const attributeCount) noexcept; //! @brief Searches for read-write handles of the attribute in this bundle by using attribute names. //! //! @copydetails IConstBundle2_abi::getConstAttributesByName_abi omni::core::Result getAttributesByName(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::AttributeDataHandle* const attributes) noexcept; //! @brief Get read write handles to all child bundles in this bundle. //! //! @copydetails IConstBundle2_abi::getConstChildBundles_abi omni::core::Result getChildBundles(omni::graph::core::BundleHandle* const bundles, size_t* const bundleCount) noexcept; //! @brief Get read write handle to child bundle by index. //! //! @copydetails IConstBundle2_abi::getConstChildBundle_abi omni::core::Result getChildBundle(size_t bundleIndex, omni::graph::core::BundleHandle* const bundle) noexcept; //! @brief Lookup for read write handles to child bundles under specified names. //! //! @copydetails IConstBundle2_abi::getConstChildBundlesByName_abi omni::core::Result getChildBundlesByName(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::BundleHandle* const foundBundles) noexcept; //! @brief Create new attributes by copying existing. //! //! Source attribute handles' data and metadata are copied. If a handle is invalid, //! then its source is ignored. //! Created attributes are owned by this bundle. //! //! @param newNames The names for the new attributes, if `nullptr` then names are taken from the source attributes. //! @param sourceAttributes Handles to attributes whose data type is to be copied. //! @param attributeCount Number of attributes to be copied. //! @param overwrite An option to overwrite existing attributes. //! @param copiedAttributes Output handles to the newly copied attributes. Can be `nullptr` if no output is //! required. //! @param copiedCount Number of successfully copied attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result copyAttributes(const omni::graph::core::NameToken* const newNames, const omni::graph::core::ConstAttributeDataHandle* const sourceAttributes, size_t attributeCount, bool overwrite, omni::graph::core::AttributeDataHandle* const copiedAttributes, size_t* const copiedCount) noexcept; //! @brief Create attributes based on provided names and types. //! //! Created attributes are owned by this bundle. //! //! @param names The names of the attributes. //! @param types The types of the attributes. //! @param elementCount Number of elements in the array, can be `nullptr` if attribute is not an array. //! @param attributeCount Number of attributes to be created. //! @param createdAttributes Output handles to the newly created attributes. Can be nullptr if no output is //! required. //! @param createdCount Number of successfully created attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createAttributes(const omni::graph::core::NameToken* const names, const omni::graph::core::Type* const types, const size_t* const elementCount, size_t attributeCount, omni::graph::core::AttributeDataHandle* const createdAttributes, size_t* const createdCount) noexcept; //! @brief Use attribute handles as pattern to create new attributes. //! //! The name and type for new attributes are taken from pattern attributes, data and metadata is not copied. //! If pattern handle is invalid, then attribute creation is skipped. //! Created attributes are owned by this bundle. //! //! @param patternAttributes Attributes whose name and type is to be used to create new attributes. //! @param patternCount Number of attributes to be created. //! @param createdAttributes Output handles to the newly created attributes. Can be nullptr if no output is //! required. //! @param createdCount Number of successfully created attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createAttributesLike(const omni::graph::core::ConstAttributeDataHandle* const patternAttributes, size_t patternCount, omni::graph::core::AttributeDataHandle* const createdAttributes, size_t* const createdCount) noexcept; //! @brief Create immediate child bundles under specified names in this bundle. //! //! Only immediate children are created. This method does not work recursively. //! If name token is invalid, then child bundle creation is skipped. //! Created bundles are owned by this bundle. //! //! @param names New children names in this bundle. //! @param nameCount Number of bundles to be created. //! @param createdBundles Output handles to the newly created bundles. Can be nullptr if no output is required. //! @param createdCount Number of successfully created child bundles. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createChildBundles(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::BundleHandle* const createdBundles, size_t* const createdCount) noexcept; //! <b>Feature not implemented yet.</b> //! //! @brief Add a set of attributes to this bundle as links. //! //! Added attributes are links to other attributes that are part of another bundle. //! If target handle is invalid, then linking is skipped. //! The links are owned by this bundle, but targets of the links are not. //! Removing links from this bundle does not destroy the data links point to. //! //! @param linkNames The names for new links. //! @param targetAttributes Handles to attributes whose data is to be added. //! @param attributeCount Number of attributes to be added. //! @param linkedAttributes Output handles to linked attributes. Can be nullptr if no output is required. //! @param linkedCount Number of attributes successfully linked. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result linkAttributes(const omni::graph::core::NameToken* const linkNames, const omni::graph::core::ConstAttributeDataHandle* const targetAttributes, size_t attributeCount, omni::graph::core::AttributeDataHandle* const linkedAttributes, size_t* const linkedCount) noexcept; //! @brief Copy bundle data and metadata from the source bundle to this bundle. //! //! If source handle is invalid, then operation is skipped. //! //! @param sourceBundle Handle to bundle whose data is to be copied. //! @param overwrite An option to overwrite existing content of the bundle. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result copyBundle(const omni::graph::core::ConstBundleHandle& sourceBundle, bool overwrite) noexcept; //! @brief Create new child bundles by copying existing. //! //! Source bundle handles' data and metadata are copied. If a handle is invalid, //! then its source is ignored. //! Created bundles are owned by this bundle. //! //! @param newNames Names for new children, if `nullptr` then names are taken from the source bundles. //! @param sourceBundles Handles to bundles whose data is to be copied. //! @param bundleCount Number of bundles to be copied. //! @param overwrite An option to overwrite existing child bundles. //! @param copiedBundles Output handles to the newly copied bundles. Can be `nullptr` if no output is required. //! @param copiedCount Number of successfully copied child bundles. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result copyChildBundles(const omni::graph::core::NameToken* const newNames, const omni::graph::core::ConstBundleHandle* const sourceBundles, size_t bundleCount, bool overwrite, omni::graph::core::BundleHandle* const copiedBundles, size_t* const copiedCount) noexcept; //! <b>Feature not implemented yet.</b> //! //! @brief Link content from the source bundle to this bundle. //! //! If source handle is invalid, then operation is skipped. //! //! @param sourceBundle Handle to bundle whose data is to be linked. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result linkBundle(const omni::graph::core::ConstBundleHandle* const sourceBundle) noexcept; //! @brief Add a set of bundles as children to this bundle as links. //! //! Created bundles are links to other bundles that are part of another bundle. //! If target handle is invalid, then operation is skipped. //! The links are owned by this bundle, but targets of the links are not. //! Removing links from this bundle does not destroy the targets data. //! //! @param linkNames Names for new links. //! @param targetBundles Handles to bundles whose data is to be added. //! @param bundleCount Number of bundles to be added. //! @param linkedBundles Handles to linked bundles. Can be nullptr if no output is required. //! @param linkedCount Number of child bundles successfully linked. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result linkChildBundles(const omni::graph::core::NameToken* const linkNames, const omni::graph::core::ConstBundleHandle* const targetBundles, size_t bundleCount, omni::graph::core::BundleHandle* const linkedBundles, size_t* const linkedCount) noexcept; //! @brief Remove attributes based on provided handles. //! //! Lookup the attribute handles and if they are part of this bundle then remove attributes' data and //! metadata. Attribute handles that are not part of this bundle are ignored. //! //! @param attributes Handles to attributes whose data is to be removed //! @param attributeCount Number of attributes to be removed. //! @param removedCount Number of attributes successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeAttributes(const omni::graph::core::ConstAttributeDataHandle* const attributes, size_t attributeCount, size_t* const removedCount) noexcept; //! @brief Remove attributes based on provided names. //! //! Lookup the attribute names and if they are part of this bundle then remove attributes' data and //! metadata. Attribute names that are not part of this bundle are ignored. //! //! @param names The names of the attributes whose data is to be removed. //! @param nameCount Number of attributes to be removed. //! @param removedCount Number of attributes successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeAttributesByName(const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept; //! @brief Remove child bundles based on provided handles. //! //! Lookup the bundle handles and if they are children of the bundle then remove them and their metadata. //! Bundle handles that are not children of this bundle are ignored. //! Only empty child bundles can be removed. //! //! @param childHandles Handles to bundles to be removed. //! @param childCount Number of child bundles to be removed. //! @param removedCount Number of child bundles successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeChildBundles(const omni::graph::core::ConstBundleHandle* const childHandles, size_t childCount, size_t* const removedCount) noexcept; //! @brief Remove child bundles based on provided names. //! //! Lookup the bundle names and if the are children of the bundle then remove them and their metadata. //! Bundle names that are not children of this bundle are ignored. //! Only empty child bundles can be removed. //! //! @param names The names of the child bundles to be removed. //! @param nameCount Number of child bundles to be removed. //! @param removedCount Number of child bundles successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeChildBundlesByName(const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept; //! @deprecated Metadata storage is deprecated and invalid handle is returned. omni::graph::core::BundleHandle getMetadataStorage() noexcept; //! @brief Search for bundle metadata fields based on provided names. //! //! Invalid attribute handles are returned for not existing names. //! //! @param fieldNames Bundle metadata field names to be searched for. //! @param fieldCount Size of fieldNames and bundleMetadata arrays. //! @param bundleMetadata Handles to bundle metadata fields in this bundle. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getBundleMetadataByName(const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::AttributeDataHandle* const bundleMetadata) noexcept; //! @brief Create bundle metadata fields in this bundle. //! //! @param fieldNames Names of new bundle metadata fields. //! @param fieldTypes Types of new bundle metadata fields. //! @param elementCount Number of elements in the array, can be `nullptr` if field is not an array. //! @param fieldCount Size of fieldNames and fieldTypes arrays. //! @param bundleMetadata Handles to the newly created bundle metadata fields. Can be `nullptr` if no output is //! required. //! @param createdCount Number of child bundles successfully created. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createBundleMetadata(const omni::graph::core::NameToken* const fieldNames, const omni::graph::core::Type* const fieldTypes, const size_t* const elementCount, size_t fieldCount, omni::graph::core::AttributeDataHandle* const bundleMetadata, size_t* const createdCount) noexcept; //! @brief Remove bundle metadata based on provided field names. //! //! @param fieldNames Names of the bundle metadata fields whose data is to be removed. //! @param fieldCount Number of the bundle metadata fields to be removed. //! @param removedCount Number of bundle metadata fields successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeBundleMetadata(const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, size_t* const removedCount) noexcept; //! @brief Search for read write field handles in the attribute by using field names. //! //! @copydetails IConstBundle2_abi::getConstAttributeMetadataByName_abi omni::core::Result getAttributeMetadataByName(omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::AttributeDataHandle* const attributeMetadata) noexcept; //! @brief Create attribute metadata fields. //! //! @param attribute Name of the attribute. //! @param fieldNames Names of new attribute metadata fields. //! @param fieldTypes Types of new attribute metadata fields. //! @param elementCount Number of elements in the array, can be `nullptr` if field is not an array. //! @param fieldCount Size of fieldNames and fieldTypes arrays. //! @param attributeMetadata Handles to the newly created attribute metadata. Can be `nullptr` if no output is //! required. //! @param createdCount Number of attribute metadata fields successfully created. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createAttributeMetadata(omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, const omni::graph::core::Type* const fieldTypes, const size_t* const elementCount, size_t fieldCount, omni::graph::core::AttributeDataHandle* const attributeMetadata, size_t* const createdCount) noexcept; //! @brief Remove attribute metadata fields. //! //! @param attribute Name of the attribute. //! @param fieldNames Names of the attribute metadata fields to be removed. //! @param fieldCount Size of fieldNames array. //! @param removedCount Number of attribute metadata fields successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeAttributeMetadata(omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, size_t* const removedCount) noexcept; //! @brief Remove all attributes, child bundles and metadata from this bundle, but keep the bundle itself. //! //! @param bundleMetadata Clears bundle metadata in this bundle. //! @param attributes Clears attributes in this bundle. //! @param childBundles Clears child bundles in this bundle. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result clearContents(bool bundleMetadata, bool attributes, bool childBundles) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::core::BundleHandle omni::core::Generated<omni::graph::core::IBundle2_abi>::getHandle() noexcept { return getHandle_abi(); } inline omni::graph::core::BundleHandle omni::core::Generated<omni::graph::core::IBundle2_abi>::getParentBundle() noexcept { return getParentBundle_abi(); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributes( omni::graph::core::AttributeDataHandle* const attributes, size_t* const attributeCount) noexcept { return getAttributes_abi(attributes, attributeCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributesByName( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::AttributeDataHandle* const attributes) noexcept { return getAttributesByName_abi(names, nameCount, attributes); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getChildBundles( omni::graph::core::BundleHandle* const bundles, size_t* const bundleCount) noexcept { return getChildBundles_abi(bundles, bundleCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getChildBundle( size_t bundleIndex, omni::graph::core::BundleHandle* const bundle) noexcept { return getChildBundle_abi(bundleIndex, bundle); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getChildBundlesByName( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::BundleHandle* const foundBundles) noexcept { return getChildBundlesByName_abi(names, nameCount, foundBundles); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::copyAttributes( const omni::graph::core::NameToken* const newNames, const omni::graph::core::ConstAttributeDataHandle* const sourceAttributes, size_t attributeCount, bool overwrite, omni::graph::core::AttributeDataHandle* const copiedAttributes, size_t* const copiedCount) noexcept { return copyAttributes_abi(newNames, sourceAttributes, attributeCount, overwrite, copiedAttributes, copiedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createAttributes( const omni::graph::core::NameToken* const names, const omni::graph::core::Type* const types, const size_t* const elementCount, size_t attributeCount, omni::graph::core::AttributeDataHandle* const createdAttributes, size_t* const createdCount) noexcept { return createAttributes_abi(names, types, elementCount, attributeCount, createdAttributes, createdCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createAttributesLike( const omni::graph::core::ConstAttributeDataHandle* const patternAttributes, size_t patternCount, omni::graph::core::AttributeDataHandle* const createdAttributes, size_t* const createdCount) noexcept { return createAttributesLike_abi(patternAttributes, patternCount, createdAttributes, createdCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createChildBundles( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::BundleHandle* const createdBundles, size_t* const createdCount) noexcept { return createChildBundles_abi(names, nameCount, createdBundles, createdCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::linkAttributes( const omni::graph::core::NameToken* const linkNames, const omni::graph::core::ConstAttributeDataHandle* const targetAttributes, size_t attributeCount, omni::graph::core::AttributeDataHandle* const linkedAttributes, size_t* const linkedCount) noexcept { return linkAttributes_abi(linkNames, targetAttributes, attributeCount, linkedAttributes, linkedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::copyBundle( const omni::graph::core::ConstBundleHandle& sourceBundle, bool overwrite) noexcept { return copyBundle_abi(sourceBundle, overwrite); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::copyChildBundles( const omni::graph::core::NameToken* const newNames, const omni::graph::core::ConstBundleHandle* const sourceBundles, size_t bundleCount, bool overwrite, omni::graph::core::BundleHandle* const copiedBundles, size_t* const copiedCount) noexcept { return copyChildBundles_abi(newNames, sourceBundles, bundleCount, overwrite, copiedBundles, copiedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::linkBundle( const omni::graph::core::ConstBundleHandle* const sourceBundle) noexcept { return linkBundle_abi(sourceBundle); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::linkChildBundles( const omni::graph::core::NameToken* const linkNames, const omni::graph::core::ConstBundleHandle* const targetBundles, size_t bundleCount, omni::graph::core::BundleHandle* const linkedBundles, size_t* const linkedCount) noexcept { return linkChildBundles_abi(linkNames, targetBundles, bundleCount, linkedBundles, linkedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributes( const omni::graph::core::ConstAttributeDataHandle* const attributes, size_t attributeCount, size_t* const removedCount) noexcept { return removeAttributes_abi(attributes, attributeCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributesByName( const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept { return removeAttributesByName_abi(names, nameCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeChildBundles( const omni::graph::core::ConstBundleHandle* const childHandles, size_t childCount, size_t* const removedCount) noexcept { return removeChildBundles_abi(childHandles, childCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeChildBundlesByName( const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept { return removeChildBundlesByName_abi(names, nameCount, removedCount); } inline omni::graph::core::BundleHandle omni::core::Generated<omni::graph::core::IBundle2_abi>::getMetadataStorage() noexcept { return getMetadataStorage_abi(); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getBundleMetadataByName( const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::AttributeDataHandle* const bundleMetadata) noexcept { return getBundleMetadataByName_abi(fieldNames, fieldCount, bundleMetadata); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createBundleMetadata( const omni::graph::core::NameToken* const fieldNames, const omni::graph::core::Type* const fieldTypes, const size_t* const elementCount, size_t fieldCount, omni::graph::core::AttributeDataHandle* const bundleMetadata, size_t* const createdCount) noexcept { return createBundleMetadata_abi(fieldNames, fieldTypes, elementCount, fieldCount, bundleMetadata, createdCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeBundleMetadata( const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, size_t* const removedCount) noexcept { return removeBundleMetadata_abi(fieldNames, fieldCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributeMetadataByName( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::AttributeDataHandle* const attributeMetadata) noexcept { return getAttributeMetadataByName_abi(attribute, fieldNames, fieldCount, attributeMetadata); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createAttributeMetadata( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, const omni::graph::core::Type* const fieldTypes, const size_t* const elementCount, size_t fieldCount, omni::graph::core::AttributeDataHandle* const attributeMetadata, size_t* const createdCount) noexcept { return createAttributeMetadata_abi( attribute, fieldNames, fieldTypes, elementCount, fieldCount, attributeMetadata, createdCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributeMetadata( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, size_t* const removedCount) noexcept { return removeAttributeMetadata_abi(attribute, fieldNames, fieldCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::clearContents(bool bundleMetadata, bool attributes, bool childBundles) noexcept { return clearContents_abi(bundleMetadata, attributes, childBundles); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/core/IObject.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Defines the base class for ABI-safe interfaces. #pragma once #include "Assert.h" #include "Result.h" #include "TypeId.h" #include "OmniAttr.h" #include <atomic> #include <climits> // CHAR_BITS #include <type_traits> //! Main namespace for Omniverse. namespace omni { //! Core functionality for Omniverse Interfaces. namespace core { // we assume 8-bit chars static_assert(CHAR_BIT == 8, "non-octet char is not supported"); class IObject_abi; class IObject; //! Base class for all @rstref{ABI-safe <abi-compatibility>} interfaces. Provides references counting and an ABI-safe //! `dynamic_cast` like mechanism. //! //! When defining a new interface, use the @ref Inherits template. //! //! When implementing one or more interfaces use the @ref omni::core::Implements template. //! //! See @oni_overview to understand the overall design of Omniverse Native Interfaces. //! //! @thread_safety All methods in this interface are thread safe. class OMNI_ATTR("no_py") IObject_abi { public: #ifndef DOXYGEN_SHOULD_SKIP_THIS //! Anonymous enum to store the type ID. enum : TypeId { kTypeId = OMNI_TYPE_ID("omni.core.IObject") //!< Uniquely identifies the @ref IObject_abi interface. }; #endif protected: //! Returns a pointer to the interface defined by the given type id if this object implements the type id's //! interface. //! //! Objects can support multiple interfaces, even interfaces that are in different inheritance chains. //! //! The returned object will have @ref omni::core::IObject::acquire() called on it before it is returned, meaning it //! is up to the caller to call @ref omni::core::IObject::release() on the returned pointer. //! //! The returned pointer can be safely `reinterpret_cast<>` to the type id's C++ class. For example, //! "omni.windowing.IWindow" can be cast to `omni::windowing::IWindow`. //! //! Do not directly use this method, rather use a wrapper function like @ref omni::core::cast() or @ref //! omni::core::ObjectPtr::as(). //! //! @thread_safety This method is thread safe. virtual void* cast_abi(TypeId id) noexcept = 0; //! Increments the object's reference count. //! //! Objects may have multiple reference counts (e.g. one per interface implemented). As such, it is important that //! you call @ref omni::core::IObject::release() on the same pointer from which you called @ref //! omni::core::IObject::acquire(). //! //! Do not directly use this method, rather use @ref omni::core::ObjectPtr, which will manage calling @ref //! omni::core::IObject::acquire() and @ref omni::core::IObject::release() for you. //! //! @thread_safety This method is thread safe. virtual void acquire_abi() noexcept = 0; //! Decrements the objects reference count. //! //! Most implementations will destroy the object if the reference count reaches 0 (though this is not a //! requirement). //! //! Objects may have multiple reference counts (e.g. one per interface implemented). As such, it is important that //! you call @ref omni::core::IObject::release() on the same pointer from which you called @ref //! omni::core::IObject::acquire(). //! //! Do not directly use this method, rather use @ref omni::core::ObjectPtr, which will manage calling @ref //! omni::core::IObject::acquire() and @ref omni::core::IObject::release() for you. //! //! @thread_safety This method is thread safe. virtual void release_abi() noexcept = 0; }; } // namespace core } // namespace omni //! By defining this macro before including a header generated by *omni.bind*, only the declaration of any generated //! boiler-plate code is included. //! //! @see OMNI_BIND_INCLUDE_INTERFACE_IMPL #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include "IObject.gen.h" namespace omni { namespace core { //! @copydoc omni::core::IObject_abi class IObject : public omni::core::Generated<omni::core::IObject_abi> { }; //! Helper template for interface inheritance. //! //! Using this template defines compile time information used by @ref omni::core::Implements. //! //! Expected usage: //! //! @code{.cpp} //! //! class IMyInterface : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("IMyInterface") //! { /* ... */ }; //! //! @endcode template <typename BASE, TypeId TYPEID> class Inherits : public BASE { public: #ifndef DOXYGEN_BUILD //! Anonymous enum to store the type ID. enum : TypeId { kTypeId = TYPEID //!< The unique interface type of this object. }; using BaseType = BASE; //!< Useful for @ref omni::core::Implements. #endif }; #ifndef DOXYGEN_BUILD namespace detail { //! Helper type used by @ref ObjectPtr. class BorrowPtrType { public: explicit BorrowPtrType() noexcept = default; }; } // namespace detail #endif //! Used to create an @ref ObjectPtr that increments an objects reference count. //! //! @code{.cpp} //! //! IMyType* raw = /* ... */; //! auto smart = ObjectPtr<IMyType>(myType, kBorrow); //! //! @endcode //! //! @ref ObjectPtr's rarely "borrow" raw pointers, rather they usually "steal" them (see @ref kSteal). //! //! See @ref omni::core::borrow(). constexpr detail::BorrowPtrType kBorrow{}; #ifndef DOXYGEN_BUILD namespace detail { //! Helper type used by ObjectPtr. class StealPtrType { public: explicit StealPtrType() noexcept = default; }; } // namespace detail #endif //! Used to create an @ref ObjectPtr that does not increments an objects reference count. The @ref ObjectPtr does //! decrement the reference count of the raw pointer upon the @ref ObjectPtr's destruction. //! //! @code //! //! auto smart = ObjectPtr<IMyType>(createMyType, kSteal); //! //! @endcode //! //! Stealing a raw pointer is quite common when a function returns a raw interface pointer. //! //! See @ref omni::core::steal(). constexpr detail::StealPtrType kSteal{}; //! Smart pointer wrapper around interface pointers. //! //! This object manages the mundane detail of managing the given objects reference count. //! //! There is no implicit raw pointer to @ref ObjectPtr conversion. Such a conversion is ambiguous, as it is unclear if //! the object's reference count should be immediately incremented. Rather, use @ref omni::core::steal() and @ref //! omni::core::borrow() to create an @ref ObjectPtr. //! //! Use @ref get() to return the raw object pointer. The pointer will still be managed by this wrapper. //! //! Use @ref detach() to return and stop managing the raw pointer. When calling @ref detach(), @ref //! omni::core::IObject::release() will not be called. //! //! Use @ref release() to decrement the raw pointer's reference count and stop managing the raw pointer. //! //! @rst //! //! .. warning:: //! ``ObjectPtr::release()`` does not have the same meaning as ``std::unique_ptr::release()``. //! ``std::unique_ptr::release()`` is equivalent to ``ObjectPtr::detach()``. //! //! @endrst //! //! Use @ref as() to cast the pointer to another interface. //! //! Unless otherwise stated, the managed pointer can be `nullptr`. //! //! @thread_safety All methods are thread safe. template <typename T> class ObjectPtr { public: //! Allow implicit conversion from `nullptr` to an @ref ObjectPtr. constexpr ObjectPtr(std::nullptr_t = nullptr) noexcept { } //! Start managing the given raw pointer. @ref omni::core::IObject::acquire() will be called on the pointer. //! //! Prefer using @ref omni::core::borrow() over this constructor. ObjectPtr(T* other, detail::BorrowPtrType) noexcept : m_ptr(other) { addRef(); } //! Start managing the given raw pointer. @ref omni::core::IObject::acquire() will *not* be called on the pointer. //! //! Prefer using @ref omni::core::steal() over this constructor. constexpr ObjectPtr(T* other, detail::StealPtrType) noexcept : m_ptr(other) { } //! Copy constructor. ObjectPtr(const ObjectPtr& other) noexcept : m_ptr(other.m_ptr) { addRef(); } //! Copy constructor. template <typename U> ObjectPtr(const ObjectPtr<U>& other) noexcept : m_ptr(other.m_ptr) { addRef(); } //! Move constructor. template <typename U> ObjectPtr(ObjectPtr<U>&& other) noexcept : m_ptr(std::exchange(other.m_ptr, {})) { } //! Destructor. Calls @ref release() on the managed pointer. ~ObjectPtr() noexcept { releaseRef(); } //! Assignment operator. ObjectPtr& operator=(const ObjectPtr& other) noexcept { copyRef(other.m_ptr); return *this; } //! Move operator. ObjectPtr& operator=(ObjectPtr&& other) noexcept { if (this != &other) { releaseRef(); m_ptr = std::exchange(other.m_ptr, {}); } return *this; } //! Assignment operator. template <typename U> ObjectPtr& operator=(const ObjectPtr<U>& other) noexcept { copyRef(other.m_ptr); return *this; } //! Move operator. template <typename U> ObjectPtr& operator=(ObjectPtr<U>&& other) noexcept { releaseRef(); m_ptr = std::exchange(other.m_ptr, {}); return *this; } //! Returns true if the managed pointer is not `nullptr`. explicit operator bool() const noexcept { return m_ptr != nullptr; } //! The managed pointer must not be `nullptr`. T* operator->() const noexcept { return m_ptr; } //! The managed pointer must not be `nullptr`. T& operator*() const noexcept { return *m_ptr; } //! Returns the raw pointer. The pointer is still managed by this wrapper. //! //! This method is useful when having to pass raw pointers to ABI functions. T* get() const noexcept { return m_ptr; } //! Returns a pointer to the managed pointer (which must be `nullptr`). //! //! The managed pointer must be `nullptr`, otherwise the function results in undefined behavior. //! //! Useful when having to manage pointers output via a function argument list. //! //! @code{.cpp} //! //! void createMyType(MyType** out); //! //! // ... //! //! ObjectPtr<MyType> ptr; //! createMyType(ptr.put()); //! //! @endcode //! //! Such methods are rare. T** put() noexcept { OMNI_ASSERT(m_ptr == nullptr); return &m_ptr; } //! Manage the given pointer. @ref omni::core::IObject::acquire() is not called on the pointer. //! //! See @ref borrow() for a method that does call @ref omni::core::IObject::acquire(). void steal(T* value) noexcept { releaseRef(); *put() = value; } //! Returns the managed pointer and no longer manages the pointer. //! //! @ref omni::core::IObject::release() is *not* called on the pointer. Use this method to stop managing the //! pointer. T* detach() noexcept { return std::exchange(m_ptr, {}); } //! Manage the given pointer. @ref omni::core::IObject::acquire() is called on the pointer. //! //! See @ref steal() for a method that does not call @ref omni::core::IObject::acquire(). void borrow(T* value) noexcept { releaseRef(); *put() = value; addRef(); } //! Cast the managed pointer to a new interface type (@p To). //! //! `nullptr` is returned if the pointer does not implement the interface. template <typename To> ObjectPtr<To> as() const noexcept { if (!m_ptr) { return nullptr; // dynamic_cast allows a nullptr, so we do as well } else { return ObjectPtr<To>(reinterpret_cast<To*>(m_ptr->cast(To::kTypeId)), kSteal); } } //! Cast the managed pointer to the type of the given @ref omni::core::ObjectPtr (e.g. @p To). //! //! `nullptr` is written to @p to if the pointer does not implement the interface. template <typename To> void as(ObjectPtr<To>& to) const noexcept { if (!m_ptr) { to.steal(nullptr); // dynamic_cast allows a nullptr, so we do as well } else { to.steal(reinterpret_cast<To*>(m_ptr->cast(To::kTypeId))); } } //! Calls @ref release() on the managed pointer and sets the internal pointer to `nullptr`. //! //! @rst //! //! .. warning:: //! ``ObjectPtr::release()`` does not have the same meaning as ``std::unique_ptr::release()``. //! ``std::unique_ptr::release()`` is equivalent to ``ObjectPtr::detach()``. //! //! @endrst void release() noexcept { releaseRef(); } //! Calls @ref release() on the managed pointer and sets the internal pointer to @p value //! //! Equivalent to `std::unique_ptr::reset()`. //! //! @param value The new value to assign to `*this`, defaults to `nullptr`. void reset(T* value = nullptr) noexcept { if (value) { const_cast<std::remove_const_t<T>*>(value)->acquire(); } T* oldval = std::exchange(m_ptr, value); if (oldval) { oldval->release(); } } private: void copyRef(T* other) noexcept { if (m_ptr != other) { releaseRef(); m_ptr = other; addRef(); } } void addRef() const noexcept { if (m_ptr) { const_cast<std::remove_const_t<T>*>(m_ptr)->acquire(); } } void releaseRef() noexcept { if (m_ptr) { std::exchange(m_ptr, {})->release(); } } template <typename U> friend class ObjectPtr; T* m_ptr{}; }; // Breathe/Sphinx is unable to handle these overloads and produces warnings. Since we don't like warnings, remove these // overloads from the docs until Breathe/Sphinx is updated. #ifndef DOXYGEN_SHOULD_SKIP_THIS //! @ref ObjectPtr less than operator. template <typename T> inline bool operator<(const ObjectPtr<T>& left, const ObjectPtr<T>& right) noexcept { return (left.get() < right.get()); } //! @ref ObjectPtr equality operator. template <typename T> inline bool operator==(const ObjectPtr<T>& left, const ObjectPtr<T>& right) noexcept { return (left.get() == right.get()); } //! @ref ObjectPtr equality operator (with raw pointer). template <typename T> inline bool operator==(const ObjectPtr<T>& left, const T* right) noexcept { return (left.get() == right); } //! @ref ObjectPtr equality operator (with raw pointer). template <typename T> inline bool operator==(const T* left, const ObjectPtr<T>& right) noexcept { return (left == right.get()); } //! @ref ObjectPtr equality operator (with nullptr). template <typename T> inline bool operator==(const ObjectPtr<T>& left, std::nullptr_t) noexcept { return (left.get() == nullptr); } //! @ref ObjectPtr equality operator (with nullptr). template <typename T> inline bool operator==(std::nullptr_t, const ObjectPtr<T>& right) noexcept { return (right.get() == nullptr); } //! @ref ObjectPtr inequality operator. template <typename T> inline bool operator!=(const ObjectPtr<T>& left, const ObjectPtr<T>& right) noexcept { return (left.get() != right.get()); } //! @ref ObjectPtr inequality operator (with raw pointer). template <typename T> inline bool operator!=(const ObjectPtr<T>& left, const T* right) noexcept { return (left.get() != right); } //! @ref ObjectPtr inequality operator (with raw pointer). template <typename T> inline bool operator!=(const T* left, const ObjectPtr<T>& right) noexcept { return (left != right.get()); } //! @ref ObjectPtr inequality operator (with nullptr). template <typename T> inline bool operator!=(const ObjectPtr<T>& left, std::nullptr_t) noexcept { return (left.get() != nullptr); } //! @ref ObjectPtr inequality operator (with nullptr). template <typename T> inline bool operator!=(std::nullptr_t, const ObjectPtr<T>& right) noexcept { return (right.get() != nullptr); } #endif // DOXYGEN_SHOULD_SKIP_THIS //! Returns an @ref ObjectPtr managing the given pointer. @ref omni::core::IObject::acquire() is **not** called on the //! pointer. //! //! `nullptr` is accepted. template <typename T> inline ObjectPtr<T> steal(T* ptr) noexcept { return ObjectPtr<T>(ptr, kSteal); } //! Returns an @ref ObjectPtr managing the given pointer. @ref omni::core::IObject::acquire() is called on the //! pointer. //! //! `nullptr` is accepted. template <typename T> inline ObjectPtr<T> borrow(T* ptr) noexcept { return ObjectPtr<T>(ptr, kBorrow); } //! Casts the given pointer to the given interface (e.g. T). //! //! `nullptr` is accepted. //! //! @returns A valid pointer is returned if the given pointer implements the given interface. Otherwise, `nullptr` is //! returned. template <typename T, typename U> inline ObjectPtr<T> cast(U* ptr) noexcept { static_assert(std::is_base_of<IObject, T>::value, "cast can only be used with classes that derive from IObject"); if (ptr) { return ObjectPtr<T>{ reinterpret_cast<T*>(ptr->cast(T::kTypeId)), kSteal }; } else { return { nullptr }; } } #ifndef DOXYGEN_BUILD namespace detail { template <typename T> inline void* cast(T* obj, TypeId id) noexcept; // forward declaration } // namespace detail #endif //! Helper template for implementing the cast function for one or more interfaces. //! //! Implementations of interfaces (usually hidden in <i>.cpp</i> files) are well served to use this template. //! //! This template provides the following useful feature: //! //! - It provides a @ref omni::core::IObject_abi::cast_abi() implementation that supports multiple inheritance. //! //! Using @ref omni::core::ImplementsCast is recommended in cases where you want the default implementation //! of the cast function, but want to override the behavior of @ref omni::core::IObject_abi::acquire_abi() and @ref //! omni::core::IObject_abi::release_abi(). If the default implementation of cast, acquire, and release functions is //! desired, then using @ref Implements is recommended. //! //! A possible usage implementing your own acquire/release semantics: //! //! @code{.cpp} //! //! class MyIFooAndIBarImpl : public omni::core::ImplementsCast<IFoo, IBar> //! { //! public: //! //! See @ref omni::core::IObject::acquire. //! inline void acquire() noexcept //! { //! // note: this implementation is needed to disambiguate which `cast` to call when using multiple //! // inheritance. it has zero-overhead. //! static_cast<IFoo*>(this)->acquire(); //! } //! //! //! See @ref omni::core::IObject::release. //! inline void release() noexcept //! { //! // note: this implementation is needed to disambiguate which `cast` to call when using multiple //! // inheritance. it has zero-overhead. //! static_cast<IFoo*>(this)->release(); //! } //! protected: //! std::atomic<uint32_t> m_refCount{ 1 }; //!< Reference count. //! //! //! @copydoc IObject_abi::acquire_abi //! virtual void acquire_abi() noexcept override //! { //! uint32_t count = m_refCount.fetch_add(1, std::memory_order_relaxed) + 1; //! CARB_LOG_INFO("Increased count to %u", count); //! } //! //! //! @copydoc IObject_abi::release_abi //! virtual void release_abi() noexcept override //! { //! uint32_t count = m_refCount.fetch_sub(1, std::memory_order_release) - 1; //! CARB_LOG_INFO("Reduced count to %u", count); //! if (0 == count) //! { //! std::atomic_thread_fence(std::memory_order_acquire); //! delete this; //! } //! } //! //! /* ... */ //! }; //! //! @endcode template <typename T, typename... Rest> struct ImplementsCast : public T, public Rest... { public: //! See @ref omni::core::IObject::cast. inline void* cast(omni::core::TypeId id) noexcept { // note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it // has zero-overhead. return static_cast<T*>(this)->cast(id); } private: // given a type id, castImpl() check if the type id matches T's typeid. if not, T's parent class type id is // checked. if T's parent class type id does not match, the grandparent class's type id is check. this continues // until IObject's type id is checked. // // if no type id in T's inheritance chain match, the next interface in Rest is checked. // // it's expected the compiler can optimize away the recursion template <typename U, typename... Args> inline void* castImpl(TypeId id) noexcept { // detail::cast will march down the inheritance chain void* obj = omni::core::detail::cast<U>(this, id); if (nullptr == obj) { // check the next class (inheritance chain) provide in the inheritance list return castImpl<Args...>(id); } return obj; } // this terminates walking across the types in the variadic template template <int = 0> inline void* castImpl(TypeId) noexcept { return nullptr; } protected: virtual ~ImplementsCast() noexcept = default; //! @copydoc omni::core::IObject_abi::cast_abi virtual void* cast_abi(TypeId id) noexcept override { return castImpl<T, Rest...>(id); } }; //! Helper template for implementing one or more interfaces. //! //! Implementations of interfaces (usually hidden in <i>.cpp</i> files) are well served to use this template. //! //! This template provides two useful features: //! //! - It provides a reference count and reasonable implementations of @ref omni::core::IObject_abi::acquire_abi() and //! @ref omni::core::IObject_abi::release_abi(). //! //! - It provides a @ref omni::core::IObject_abi::cast_abi() implementation that supports multiple inheritance. //! //! Using @ref omni::core::Implements is recommended in most cases when implementing one or more interfaces. //! //! Expected usage: //! //! @code{.cpp} //! //! class MyIFooAndIBarImpl : public omni::core::Implements<IFoo, IBar> //! { /* ... */ }; //! //! @endcode template <typename T, typename... Rest> struct Implements : public ImplementsCast<T, Rest...> { public: //! See @ref omni::core::IObject::acquire. inline void acquire() noexcept { // note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it // has zero-overhead. static_cast<T*>(this)->acquire(); } //! See @ref omni::core::IObject::release. inline void release() noexcept { // note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it // has zero-overhead. static_cast<T*>(this)->release(); } protected: std::atomic<uint32_t> m_refCount{ 1 }; //!< Reference count. virtual ~Implements() noexcept = default; //! @copydoc omni::core::IObject_abi::acquire_abi virtual void acquire_abi() noexcept override { m_refCount.fetch_add(1, std::memory_order_relaxed); } //! @copydoc omni::core::IObject_abi::release_abi virtual void release_abi() noexcept override { if (0 == m_refCount.fetch_sub(1, std::memory_order_release) - 1) { std::atomic_thread_fence(std::memory_order_acquire); delete this; } } }; #ifndef DOXYGEN_BUILD namespace detail { //! Given a type, this function walks the inheritance chain for the type, checking if the id of the type matches the //! given id. //! //! Implementation detail. Do not call. template <typename T> inline void* cast(T* obj, TypeId id) noexcept { if (T::kTypeId == id) { obj->acquire(); // match! since we return an interface pointer, acquire() must be called. return obj; } else { return cast<typename T::BaseType>(obj, id); // call cast again, but with the parent type } } //! Specialization of `cast<T>(T*, TypeId)` for @ref omni::core::IObject. @ref omni::core::IObject always terminates the //! recursive template since it does not have a base class. //! //! Implementation detail. Do not call. template <> inline void* cast<IObject>(IObject* obj, TypeId id) noexcept { if (IObject::kTypeId == id) { obj->acquire(); return obj; } else { return nullptr; } } } // namespace detail #endif } // namespace core } // namespace omni //! By defining this macro before including a header generated by *omni.bind*, only the implementations of any //! generated boiler-plate code is included. //! //! @see OMNI_BIND_INCLUDE_INTERFACE_DECL #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include "IObject.gen.h"
omniverse-code/kit/include/omni/core/TypeId.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! @brief Helper functions and macros for generating type identifiers. #pragma once #include "../../carb/Defines.h" #include <cstdint> namespace omni { namespace core { //! Base type for an interface type identifier. using TypeId = uint64_t; //! Returns the type id of the given type name at compile time. //! //! See omni::core::typeId() for a version of this macro that is evaluated at runtime. #define OMNI_TYPE_ID(str_) CARB_HASH_STRING(str_) //! Returns the type id of the given type name at run time. //! //! If possible, use OMNI_TYPE_ID(), which is evaluated at compile time. inline TypeId typeId(const char* str) { // currently, the user must choose between calling typeId() or OMNI_TYPE_ID() to map a string to an id. the user's // decision should be based on if the input string is known at compile time or not. // // we should be able to have a single constexpr function, typeId(), that will determine if the input string is // constant or not. the compiler would then be able to evaluate the string correctly at compile time or runtime. // this would make the user's life easier. // // unfortunately, MSVC incorrectly warns when it detects that an unsigned expression will overflow in a constexpr. // overflowing an unsigned value is well-defined and actually by design in our hashing algorithm. // // disabling this warning is even more of a pain, as MSVC doesn't allow you to disable the warning at the // overflowing expression. rather, you must disable the warning at each call site of the constexpr. // // this bug is fixed in late 2019. since customers may be on older compilers, we're stuck with this workaround. // // https://developercommunity.visualstudio.com/content/problem/211134/unsigned-integer-overflows-in-constexpr-functionsa.html // https://stackoverflow.com/questions/57342279/how-can-i-suppress-constexpr-warnings-in-msvc return carb::hashString(str); } } // namespace core } // namespace omni
omniverse-code/kit/include/omni/core/Api.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! @brief Helper macros to provide API calling convention tags. #pragma once #include "Platform.h" #ifdef __cplusplus //! Declares a "C" exported external symbol. This uses the "C" name decoration style of //! adding an underscore to the start of the exported name. # define OMNI_EXTERN_C extern "C" #else //! Declares a "C" exported external symbol. This uses the "C" name decoration style of //! adding an underscore to the start of the exported name. # define OMNI_EXTERN_C #endif //! \copydoc CARB_EXPORT #define OMNI_EXPORT CARB_EXPORT // Functions that wish to be exported from a .dll/.so should be decorated with OMNI_API. // // Functions related to modules, such omniGetModuleExports(), should be decorated with OMNI_MODULE_API. #ifdef OMNI_COMPILE_AS_DYNAMIC_LIBRARY # if OMNI_PLATFORM_WINDOWS //! Declares a symbol that is marked as externally exported. The symbol will be exported //! with C decorations. On Windows, this is expected to be exported from the containing DLL. //! On Linux, this is exported as having default visibility from the module instead of being //! hidden. This export tag should only be used when tagging exported symbols from within //! omni.core itself. Exported symbols in other modules (such as `omniGetModuleExports()` //! functions in implementation libraries) should use @ref OMNI_MODULE_API instead. # define OMNI_API OMNI_EXTERN_C __declspec(dllexport) # elif OMNI_PLATFORM_LINUX || OMNI_PLATFORM_MACOS //! Declares a symbol that is marked as externally exported. The symbol will be exported //! with C decorations. On Windows, this is expected to be exported from the containing DLL. //! On Linux, this is exported as having default visibility from the module instead of being //! hidden. This export tag should only be used when tagging exported symbols from within //! omni.core itself. Exported symbols in other modules (such as `omniGetModuleExports()` //! functions in implementation libraries) should use @ref OMNI_MODULE_API instead. # define OMNI_API OMNI_EXTERN_C __attribute__((visibility("default"))) # endif #else //! Declares a symbol that is marked as externally exported. The symbol will be exported //! with C decorations. On Windows, this is expected to be exported from the containing DLL. //! On Linux, this is exported as having default visibility from the module instead of being //! hidden. This export tag should only be used when tagging exported symbols from within //! omni.core itself. Exported symbols in other modules (such as `omniGetModuleExports()` //! functions in implementation libraries) should use @ref OMNI_MODULE_API instead. # define OMNI_API OMNI_EXTERN_C #endif // Functions related to modules should be decorated with OMNI_MODULE_API. Currently, only omniGetModuleExports() // qualifies. #ifdef OMNI_COMPILE_AS_MODULE # ifdef OMNI_COMPILE_AS_DYNAMIC_LIBRARY # error "OMNI_COMPILE_AS_DYNAMIC_LIBRARY and OMNI_COMPILE_AS_MODULE cannot be both defined" # endif # if OMNI_PLATFORM_WINDOWS //! Declares a function that is marked as externally exported. The symbol will be exported //! with C decorations. On Windows, this is expected to be exported from the containing DLL. //! On Linux, this is exported as having default visibility from the module instead of being //! hidden. This is intended for exported symbols in implementation libraries. # define OMNI_MODULE_API OMNI_EXTERN_C __declspec(dllexport) # elif OMNI_PLATFORM_LINUX || OMNI_PLATFORM_MACOS //! Declares a function that is marked as externally exported. The symbol will be exported //! with C decorations. On Windows, this is expected to be exported from the containing DLL. //! On Linux, this is exported as having default visibility from the module instead of being //! hidden. This is intended for exported symbols in implementation libraries. # define OMNI_MODULE_API OMNI_EXTERN_C __attribute__((visibility("default"))) # else CARB_UNSUPPORTED_PLATFORM(); # endif #else //! Declares a function that is marked as externally exported. The symbol will be exported //! with C decorations. On Windows, this is expected to be exported from the containing DLL. //! On Linux, this is exported as having default visibility from the module instead of being //! hidden. This is intended for exported symbols in implementation libraries. # define OMNI_MODULE_API OMNI_EXTERN_C #endif
omniverse-code/kit/include/omni/core/Types.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! @brief Common data structs and types. #pragma once #include "OmniAttr.h" #include "../../carb/Types.h" #ifndef DOXYGEN_SHOULD_SKIP_THIS CARB_IGNOREWARNING_MSC_WITH_PUSH(4201) // nonstandard extension used: nameless struct/union #endif namespace omni { namespace core { /** Helper struct to represent a single 2-space vector of unsigned integers. Each member * of the struct can be accessed in multiple ways including an array and direct accessors * known by multiple names. Objects of this struct are guaranteed to be only as large as * two 32-bit unsigned integers. */ union OMNI_ATTR("vec") UInt2 { /** Access to the value members in this object as an array. */ OMNI_ATTR("no_py") uint32_t data[2]; // must be first for proper { } initialization /** Structure of unions containing the possible names of the first and second values * in this object. */ struct { /** Names for the first data member in this object. This can be used to access * the value treating it as a Cartesian coordinate (`x`), texture coordinate * (`u`, `s`), or a dimensional size (`w`). These are all just different names * for the same value that can be used to help with the semantics of an access. */ union { /** Provides access to the first data member as a Cartesian X coordinate. */ OMNI_ATTR("init_arg") uint32_t x; uint32_t u; ///< Provides access to the first data member as a U texture coordinate. uint32_t s; ///< Provides access to the first data member as an S texture coordinate. uint32_t w; ///< Provides access to the first data member as a width value. }; /** Names for the second data member in this object. This can be used to access * the value treating it as a Cartesian coordinate (`y`), texture coordinate * (`v`, `t`), or a dimensional size (`h`). These are all just different names * for the same value that can be used to help with the semantics of an access. */ union { /** Provides access to the first data member as a Cartesian Y coordinate. */ OMNI_ATTR("init_arg") uint32_t y; uint32_t v; ///< Provides access to the first data member as a V texture coordinate. uint32_t t; ///< Provides access to the first data member as an T texture coordinate. uint32_t h; ///< Provides access to the first data member as a height value. }; }; }; static_assert(sizeof(UInt2) == (sizeof(uint32_t) * 2), "unexpected UInt2 size"); /** Helper struct to represent a single 2-space vector of signed integers. Each member of * the struct can be accessed in multiple ways including an array and direct accessors known * by multiple names. Objects of this struct are guaranteed to be only as large as two * 32-bit signed integers. */ union OMNI_ATTR("vec") Int2 { /** Access to the value members in this object as an array. */ OMNI_ATTR("no_py") int32_t data[2]; // must be first for proper { } initialization /** Structure of unions containing the possible names of the first and second values * in this object. */ struct { /** Names for the first data member in this object. This can be used to access * the value treating it as a Cartesian coordinate (`x`), texture coordinate * (`u`, `s`), or a dimensional size (`w`). These are all just different names * for the same value that can be used to help with the semantics of an access. */ union { /** Provides access to the first data member as a Cartesian X coordinate. */ OMNI_ATTR("init_arg") int32_t x; int32_t u; ///< Provides access to the first data member as a U texture coordinate. int32_t s; ///< Provides access to the first data member as an S texture coordinate. int32_t w; ///< Provides access to the first data member as a width value. }; /** Names for the second data member in this object. This can be used to access * the value treating it as a Cartesian coordinate (`y`), texture coordinate * (`v`, `t`), or a dimensional size (`h`). These are all just different names * for the same value that can be used to help with the semantics of an access. */ union { /** Provides access to the first data member as a Cartesian Y coordinate. */ OMNI_ATTR("init_arg") int32_t y; int32_t v; ///< Provides access to the first data member as a V texture coordinate. int32_t t; ///< Provides access to the first data member as an T texture coordinate. int32_t h; ///< Provides access to the first data member as a height value. }; }; }; static_assert(sizeof(Int2) == (sizeof(int32_t) * 2), "unexpected Int2 size"); /** Helper struct to represent a single 2-space vector of floating point values. Each member of * the struct can be accessed in multiple ways including an array and direct accessors known * by multiple names. Objects of this struct are guaranteed to be only as large as two * 32-bit floating point values. */ union OMNI_ATTR("vec") Float2 { /** Access to the value members in this object as an array. */ OMNI_ATTR("no_py") float data[2]; // must be first for proper { } initialization /** Structure of unions containing the possible names of the first and second values * in this object. */ struct { /** Names for the first data member in this object. This can be used to access * the value treating it as a Cartesian coordinate (`x`), texture coordinate * (`u`, `s`), or a dimensional size (`w`). These are all just different names * for the same value that can be used to help with the semantics of an access. */ union { /** Provides access to the first data member as a Cartesian X coordinate. */ OMNI_ATTR("init_arg") float x; float u; ///< Provides access to the first data member as a U texture coordinate. float s; ///< Provides access to the first data member as an S texture coordinate. float w; ///< Provides access to the first data member as a width value. }; /** Names for the second data member in this object. This can be used to access * the value treating it as a Cartesian coordinate (`y`), texture coordinate * (`v`, `t`), or a dimensional size (`h`). These are all just different names * for the same value that can be used to help with the semantics of an access. */ union { /** Provides access to the first data member as a Cartesian Y coordinate. */ OMNI_ATTR("init_arg") float y; float v; ///< Provides access to the first data member as a V texture coordinate. float t; ///< Provides access to the first data member as an T texture coordinate. float h; ///< Provides access to the first data member as a height value. }; }; }; static_assert(sizeof(Float2) == (sizeof(float) * 2), "unexpected Float2 size"); } // namespace core } // namespace omni #ifndef DOXYGEN_SHOULD_SKIP_THIS CARB_IGNOREWARNING_MSC_POP #endif #include "Types.gen.h"
omniverse-code/kit/include/omni/core/IObject.gen.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/OmniAttr.h> #include <omni/core/Interface.h> #include <omni/core/ResultError.h> #include <functional> #include <utility> #include <type_traits> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Base class for all @rstref{ABI-safe <abi-compatibility>} interfaces. Provides references counting and an ABI-safe //! `dynamic_cast` like mechanism. //! //! When defining a new interface, use the @ref Inherits template. //! //! When implementing one or more interfaces use the @ref omni::core::Implements template. //! //! See @oni_overview to understand the overall design of Omniverse Native Interfaces. //! //! @thread_safety All methods in this interface are thread safe. template <> class omni::core::Generated<omni::core::IObject_abi> : public omni::core::IObject_abi { public: OMNI_PLUGIN_INTERFACE("omni::core::IObject") //! Returns a pointer to the interface defined by the given type id if this object implements the type id's //! interface. //! //! Objects can support multiple interfaces, even interfaces that are in different inheritance chains. //! //! The returned object will have @ref omni::core::IObject::acquire() called on it before it is returned, meaning it //! is up to the caller to call @ref omni::core::IObject::release() on the returned pointer. //! //! The returned pointer can be safely `reinterpret_cast<>` to the type id's C++ class. For example, //! "omni.windowing.IWindow" can be cast to `omni::windowing::IWindow`. //! //! Do not directly use this method, rather use a wrapper function like @ref omni::core::cast() or @ref //! omni::core::ObjectPtr::as(). //! //! @thread_safety This method is thread safe. void* cast(omni::core::TypeId id) noexcept; //! Increments the object's reference count. //! //! Objects may have multiple reference counts (e.g. one per interface implemented). As such, it is important that //! you call @ref omni::core::IObject::release() on the same pointer from which you called @ref //! omni::core::IObject::acquire(). //! //! Do not directly use this method, rather use @ref omni::core::ObjectPtr, which will manage calling @ref //! omni::core::IObject::acquire() and @ref omni::core::IObject::release() for you. //! //! @thread_safety This method is thread safe. void acquire() noexcept; //! Decrements the objects reference count. //! //! Most implementations will destroy the object if the reference count reaches 0 (though this is not a //! requirement). //! //! Objects may have multiple reference counts (e.g. one per interface implemented). As such, it is important that //! you call @ref omni::core::IObject::release() on the same pointer from which you called @ref //! omni::core::IObject::acquire(). //! //! Do not directly use this method, rather use @ref omni::core::ObjectPtr, which will manage calling @ref //! omni::core::IObject::acquire() and @ref omni::core::IObject::release() for you. //! //! @thread_safety This method is thread safe. void release() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline void* omni::core::Generated<omni::core::IObject_abi>::cast(omni::core::TypeId id) noexcept { return cast_abi(id); } inline void omni::core::Generated<omni::core::IObject_abi>::acquire() noexcept { acquire_abi(); } inline void omni::core::Generated<omni::core::IObject_abi>::release() noexcept { release_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/core/ReplaceCarbAssert.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // NOTE: This comment is left for historical purposes, but is no longer accurate. The `g_carbAssert` global variable is // weakly-linked now, so it no longer requires linking against carb.dll. #if 0 // Include this file (near the top of your includes list) when compiling a DLL which does not depend on any Carbonite // interfaces. // // This file solves the following problem: some inline code in carb::extras uses CARB_ASSERT, which causes a dependency // on g_carbAssert. When compiling code as a DLL for implicit linking (i.e. not a module/plugin), the linker will not // be able to find g_carbAssert. The DLL can define g_carbAssert, but no one is likely to set it to a valid value. The // result is a crash. // // This file redefines the CARB_ASSERT macros to the OMNI_ASSERT macros. The OMNI_ASSERT macros do not depend on global // variables. # define CARB_ASSERT OMNI_ASSERT # define CARB_ASSERT_ENABLED OMNI_ASSERT_ENABLED # define CARB_CHECK OMNI_CHECK # define CARB_CHECK_ENABLED OMNI_CHECK_ENABLED # define CARB_FATAL_UNLESS OMNI_FATAL_UNLESS #endif #include "../../carb/Defines.h" #include "Assert.h"
omniverse-code/kit/include/omni/core/OmniInit.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Core header for starting the Omniverse core. #pragma once #include "Omni.h" #include "../../carb/ClientUtils.h" #include "../../carb/StartupUtils.h" //! Initializes the omni library along with Carbonite. Ensures that both libraries will be cleaned up upon exit. //! //! This macro should be used in `main()`. It creates some objects which will release the framework when they go out of //! scope. //! //! Use this macro in conjunction with @ref OMNI_APP_GLOBALS(). //! //! For startup, this function calls \ref carb::acquireFrameworkAndRegisterBuiltins() and \ref carb::startupFramework(). //! At a high level, these functions: //! //! - Determines application path from CLI args and env vars (see @ref carb::extras::getAppPathAndName()). //! - Sets application path as filesystem root. //! - Loads plugins for settings: *carb.settings.plugin*, *carb.dictionary.plugin*, *carb.tokens.plugins* and any //! serializer plugin. //! - Searches for config file, loads it and applies CLI args overrides. //! - Configures logging with config file. //! - Loads plugins according to config file. //! - Configures default plugins according to config file. //! - Starts the default profiler (if loaded). //! //! @param ... May be either \a empty (default initialization), `argc, argv` (command-line arguments), or a const- //! reference to a \ref carb::StartupFrameworkDesc. #define OMNI_CORE_INIT(...) \ omni::core::ScopedOmniCore scopedOmniverse_; \ omni::core::ScopedFrameworkStartup scopedFrameworkStartup_{ __VA_ARGS__ }; namespace omni { namespace core { //! Scoped object which calls @ref OMNI_CORE_START() and @ref OMNI_CORE_STOP(). //! //! Rather than directly using this object, use @ref OMNI_CORE_INIT(). struct ScopedOmniCore { //! Starts the Carbonite @ref carb::Framework and calls @ref omniCoreStart. ScopedOmniCore(const OmniCoreStartArgs* args = nullptr) { if (!carb::getFramework()) { carb::acquireFrameworkAndRegisterBuiltins(args); } } //! Calls @ref omniCoreStop and tears down the Carbonite @ref carb::Framework. ~ScopedOmniCore() { carb::releaseFrameworkAndDeregisterBuiltins(); } private: CARB_PREVENT_COPY_AND_MOVE(ScopedOmniCore); }; //! Scoped object which calls @ref carb::startupFramework() and @ref carb::shutdownFramework(). //! //! Rather than directly using this object, use @ref OMNI_CORE_INIT(). struct ScopedFrameworkStartup { //! Default constructor which does not startup the framework due to a lack of arguments. //! //! This constructor is present to make @ref OMNI_CORE_INIT() useful when the application wishes to call @ref //! carb::startupFramework() explicitly. ScopedFrameworkStartup() : m_startedFramework{ false } { } //! Constructor which passes @p argc and @p argv to @ref carb::startupFramework(). //! //! All other parameters passed to @ref carb::startupFramework() are default values. ScopedFrameworkStartup(int argc, char** argv) : m_startedFramework{ true } { carb::StartupFrameworkDesc startupParams = carb::StartupFrameworkDesc::getDefault(); startupParams.argv = argv; startupParams.argc = argc; carb::startupFramework(startupParams); } //! Constructor which allows specify all parameters to @ref carb::startupFramework(). ScopedFrameworkStartup(const carb::StartupFrameworkDesc& startupParams) : m_startedFramework{ true } { carb::startupFramework(startupParams); } //! Calls @ref carb::shutdownFramework() if a non-default constructor was called. ~ScopedFrameworkStartup() { if (m_startedFramework) { carb::shutdownFramework(); } } private: CARB_PREVENT_COPY_AND_MOVE(ScopedFrameworkStartup); private: bool m_startedFramework; }; } // namespace core } // namespace omni
omniverse-code/kit/include/omni/core/IWeakObject.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/OmniAttr.h> #include <omni/core/Interface.h> #include <omni/core/ResultError.h> #include <functional> #include <utility> #include <type_traits> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Control block to maintain weak and strong reference counts for an object. //! //! The @ref IWeakObject interface supports the notion of "weak pointers". Unlike "strong pointers" (e.g. @ref //! ObjectPtr) weak pointers do not affect the pointee's reference count. While this sounds like a raw pointer (and //! possibly a bad idea), the magic of a weak pointer is that if the pointee's reference count goes to zero, the weak //! pointer updates its internal pointer to `nullptr`. //! //! @ref IWeakObjectControlBlock is an ABI-safe object used to store a pointer to both the object and the object's //! reference count (i.e. the "strong count"). This object additionally stores a "weak count", which is a count of //! objects pointing to the @ref IWeakObjectControlBlock. //! //! Both @ref WeakPtr and @ref IWeakObject affect the "weak count". //! //! Only @ref ObjectPtr will affect the "strong count". //! //! Direct usage of this object should be avoided. See @ref WeakPtr to learn how weak pointers are used in practice. //! //! **Advanced: Design Considerations** //! //! The design of ONI's weak pointers takes three main design considerations into account: //! //! - The user API should work similar to <a href="https://en.cppreference.com/w/cpp/memory/weak_ptr">std::weak_ptr</a>. //! //! - Enabling weak pointer support for an object should should not tank performance in hot code paths. //! //! - Weak pointers must be able to point to object's whose DLL has been unloaded from memory. //! //! Above, the final point has a strong affect on the implementation of weak pointers. In particular, this object (i.e. //! @ref IWeakObjectControlBlock). Consider: //! //! - For a virtual function to be called successfully, the code implementing the virtual function must still be loaded. //! //! - An @ref IWeakObjectControlBlock may outlive the DLL that created the object to which it points. //! //! Rather than exposing a raw struct with the weak and strong counts (and associated inline code to manipulate them), //! this interface is used to hide both the counts and the manipulation logic. However, this introduces virtual //! functions, which could potentially be unloaded. To address the unloading problem, *carb.dll* provides //! `omni::core::getOrCreateWeakObjectControlBlock()`. This C-ABI returns an implementation of @ref //! IWeakObjectControlBlock implemented within *carb.dll*. This effectively avoids the DLL unloading problem, since //! *carb.dll* is considered a core dependency that cannot be unloaded and therefore the virtual function //! implementations for @ref IWeakObjectControlBlock will always be loaded. template <> class omni::core::Generated<omni::core::IWeakObjectControlBlock_abi> : public omni::core::IWeakObjectControlBlock_abi { public: OMNI_PLUGIN_INTERFACE("omni::core::IWeakObjectControlBlock") //! Returns a pointer to the object pointed to by this control block. May return `nullptr`. //! //! If the object pointed to by this control block has a strong reference count of zero, `nullptr` is returned. //! Otherwise, @ref IObject::acquire() is called on the object before being returned. //! //! @thread_safety This method is thread safe. omni::core::ObjectPtr<omni::core::IObject> getObject() noexcept; }; //! Interface defining a contract for objects which support "weak"/non-owning references. //! //! This interface works tightly with @ref WeakPtr to implement weak pointers. Users of weak pointers should focus on //! @ref WeakPtr rather than this interface, as this interface is an implementation detail of the weak pointer ABI. //! //! Developers wishing to add weak pointer support to their objects must implement this interface, which is a //! non-trivial task. A default implementation is provided in @ref ImplementsWeak. template <> class omni::core::Generated<omni::core::IWeakObject_abi> : public omni::core::IWeakObject_abi { public: OMNI_PLUGIN_INTERFACE("omni::core::IWeakObject") //! Returns a control block containing reference count information needed for the implementation of weak pointers. //! //! Users of weak pointers must never call this method. Rather, they should focus on exclusively using @ref //! WeakPtr. //! //! Implementers of this method are encouraged to use the implementation found in @ref omni::core::ImplementsWeak. //! //! The returns pointer is never `nullptr`. //! //! The returned pointer will have @ref IObject::acquire() called on it before being returned. //! //! @thread_safety This method is thread safe. omni::core::ObjectPtr<omni::core::IWeakObjectControlBlock> getWeakObjectControlBlock() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::ObjectPtr<omni::core::IObject> omni::core::Generated<omni::core::IWeakObjectControlBlock_abi>::getObject() noexcept { return omni::core::steal(getObject_abi()); } inline omni::core::ObjectPtr<omni::core::IWeakObjectControlBlock> omni::core::Generated< omni::core::IWeakObject_abi>::getWeakObjectControlBlock() noexcept { return omni::core::steal(getWeakObjectControlBlock_abi()); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/core/Result.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! \file //! //! \brief Result codes form the basics of error handling. #pragma once #include "../../carb/Defines.h" #include "OmniAttr.h" #include <cstdint> namespace omni { namespace core { //! \{ //! Error code for the result of an operation. //! //! The numeric encoding for values follows Microsoft's //! <a //! href="https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/0642cb2f-2075-4469-918c-4441e69c548a"> //! HRESULT</a> scheme. Many values are direct copies of those from the Windows API, such as \c kResultNotImplemented. //! Codes which are NVIDIA-provided, will have the mask \c 0xa4310000. This comes from setting the "customer bit" (bit //! at most-significant index 2) and having a "facility" (bits from index 5-15) of \c 0b10000110001 aka \c 0x431 (which //! is \c "NVDA" in Morse Code). using Result OMNI_ATTR("constant, prefix=kResult") = std::int32_t; //! Returns \c true if the given \ref omni::core::Result is not a failure code. //! //! `true` will be returned not only if the given result is \ref omni::core::kResultSuccess, but any other \ref //! omni::core::Result that is not a failure code (such as warning \ref omni::core::Result codes). #define OMNI_SUCCEEDED(x_) ((x_) >= 0) //! Returns `true` if the given @ref omni::core::Result is a failure code. #define OMNI_FAILED(x_) ((x_) < 0) //! If the given \ref omni::core::Result is a failure code, calls `return result` to exit the current function. #define OMNI_RETURN_IF_FAILED(x_) \ do \ { \ auto result = (x_); \ if (OMNI_FAILED(result)) \ { \ return result; \ } \ } while (0) //! Operation successful. No error occurred. constexpr Result kResultSuccess = 0; //! The feature or method was not implemented. It might be at some point in the future. //! //! * POSIX: \c ENOSYS //! * Windows: \c E_NOTIMPL //! * Decimal Value: -2147467263 constexpr Result kResultNotImplemented = 0x80004001; //! The operation was aborted. //! //! * Windows: \c E_ABORT //! * Decimal Value: -2147467260 constexpr Result kResultOperationAborted = 0x80004004; //! The operation failed. //! * Decimal Value: -2147467259 constexpr Result kResultFail = 0x80004005; //! The item was not found. //! * Decimal Value: -2147024894 constexpr Result kResultNotFound = 0x80070002; //! Access has been denied for this operation. //! //! * POSIX: \c EACCES //! * Windows: \c E_ACCESSDENIED //! * Decimal Value: -2147024891 constexpr Result kResultAccessDenied = 0x80070005; //! A system is out of memory. This does not necessarily mean resident memory has been exhausted (although it can), //! as this code can be used to special conditions such as exhausting graphics memory or running out of a specific //! memory pool. It can also indicate that an allocation would have been too big and failed ahead of time. //! //! * POSIX: \c ENOMEM //! * Windows: \c E_OUTOFMEMORY //! * Decimal Value: -2147024882 constexpr Result kResultOutOfMemory = 0x8007000E; //! The operation is not supported. //! * Decimal Value: -2147024846 constexpr Result kResultNotSupported = 0x80070032; //! One or more of the arguments passed to a given function was invalid. //! //! * POSIX: \c EINVAL //! * Windows: \c E_INVALIDARG //! * Decimal Value: -2147024809 constexpr Result kResultInvalidArgument = 0x80070057; //! The system is in an invalid state to perform the operation. This is distinct from \c kResultInvalidOperation in that //! it covers situations like "system is not yet started" or "file is closed." //! * Decimal Value: -2147024892 constexpr Result kResultInvalidState = 0x80070004; //! Version check failure. //! * Decimal Value: -2147024253 constexpr Result kResultVersionCheckFailure = 0x80070283; //! Failed to parse the version. //! * Decimal Value: -2147024119 constexpr Result kResultVersionParseError = 0x80070309; //! Insufficient buffer. //! * Decimal Value: -2147024774 constexpr Result kResultInsufficientBuffer = 0x8007007A; //! Try the operation again. This is typically emitted in situations where an operation would require blocking, but the //! system is configured to be non-blocking. For example, attempting to read from a TCP socket when no data has been //! received would return \c kResultTryAgain. //! //! * POSIX: \c EAGAIN, \c EWOULDBLOCK //! * Windows: \c WMI_TRY_AGAIN //! * Decimal Value: -2147020693 constexpr Result kResultTryAgain = 0x8007106B; //!< Try the operation again. //! An operation was interrupted. An "interruption" happens in cases where the operation did not complete successfully //! due to an outside system (such as a timer) interrupting it. For example, a function `Result wait_for(duration d)` //! might give \c kResultSuccess when function returns because the duration expired and \c kResultInterrupted if the //! system is shutting down. //! //! * POSIX: \c EINTR //! * Windows: \c WSAEINTR //! * Decimal Value: -1540292607 constexpr Result kResultInterrupted = 0xa4310001; //! Interface not implemented. //! * Decimal Value: -2147467262 constexpr Result kResultNoInterface = 0x80004002; //! Pointer is null. //! //! * POSIX: covered by \c EINVAL //! * Decimal Value: -2147467261 constexpr Result kResultNullPointer = 0x80004003; //! Object already exists. //! //! * POSIX: \c EEXIST or \c EBUSY //! * Decimal Value: -2147286960 constexpr Result kResultAlreadyExists = 0x80030050; //! The operation was not valid for the target. For example, attempting to perform a write operation on a read-only file //! would result in this error. //! //! * POSIX: \c EPERM //! * Decimal Value: -2147020579 constexpr Result kResultInvalidOperation = 0x800710DD; //! No more items to return. This is meant for things like reader queues when they have run out of data and will never //! have more data. For cases where something like an async queue being temporarily empty, use \c kResultTryAgain. //! * Decimal Value: -2146893782 constexpr Result kResultNoMoreItems = 0x8009002A; //! Invalid index. //! //! * POSIX: covered by \c EINVAL or \c ENOENT, depending on the situation //! * Decimal Value: -2146889720 constexpr Result kResultInvalidIndex = 0x80091008; //! Not enough data. //! * Decimal Value: -2144796415 constexpr Result kResultNotEnoughData = 0x80290101; //! Too much data. //! * Decimal Value: -2144796414 constexpr Result kResultTooMuchData = 0x80290102; //! Invalid data type. This is used in cases where a specific type of data is requested, but that is not the data which //! the receiver has. //! * Decimal Value: -2144272373 constexpr Result kResultInvalidDataType = 0x8031000B; //! Invalid data size. This arises when the correct type of data is requested, but the requester believes the data size //! is different from the receiver. The cause of this is typically a version mismatch. //! * Decimal Value: -2144272372 constexpr Result kResultInvalidDataSize = 0x8031000C; //! \} //! \cond DEV // clang-format off //! The list of all result codes as a higher-order macro. The provided \c item_ should accept three parameters: //! //! * \c symbol -- the PascalCase version of the symbol; e.g.: \c AlreadyExists. Note that this includes neither the //! \c kResult prefix nor the \c omni::core namespace qualifier, so you need to paste those yourself if desired. //! * \c snake_symbol -- the snake_case version of the symbol; e.g.: \c try_again //! * \c message -- a string literal of the associated message for the error; e.g.: `"access denied"` #define OMNI_RESULT_CODE_LIST(item_) \ /* (symbol, snek_symbol, message) */ \ item_(Success, success, "operation succeeded") \ item_(NotImplemented, not_implemented, "not implemented") \ item_(OperationAborted, operation_aborted, "aborted") \ item_(Fail, fail, "failure") \ item_(NotFound, not_found, "not found") \ item_(AccessDenied, access_denied, "access denied") \ item_(OutOfMemory, out_of_memory, "out of memory") \ item_(NotSupported, not_supported, "not supported") \ item_(InvalidArgument, invalid_argument, "invalid argument") \ item_(InvalidState, invalid_state, "invalid state") \ item_(VersionCheckFailure, version_check_failure, "version check failure") \ item_(VersionParseError, version_parse_error, "version parse error") \ item_(InsufficientBuffer, insufficient_buffer, "insufficient buffer") \ item_(TryAgain, try_again, "try again") \ item_(Interrupted, interrupted, "interrupted") \ item_(NoInterface, no_interface, "no interface") \ item_(NullPointer, null_pointer, "null pointer") \ item_(AlreadyExists, already_exists, "already exists") \ item_(InvalidOperation, invalid_operation, "invalid operation") \ item_(NoMoreItems, no_more_items, "no more items") \ item_(InvalidIndex, invalid_index, "invalid index") \ item_(NotEnoughData, not_enough_data, "not enough data") \ item_(TooMuchData, too_much_data, "too much data") \ item_(InvalidDataType, invalid_data_type, "invalid data type") \ item_(InvalidDataSize, invalid_data_size, "invalid data size") // clang-format on //! \endcond } // namespace core } // namespace omni
omniverse-code/kit/include/omni/core/ModuleInfo.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! @brief Helper functions for collecting module information. #pragma once #include "../../carb/extras/Library.h" #include "../../carb/Interface.h" #include "IObject.h" namespace omni { namespace core { //! Given an object, returns the name of the module (.dll/.exe) which contains the object's code. //! //! @param[in] obj The object to retrieve the module name for. This may not be `nullptr`. //! @returns The name and path of the library that the given object's owning implementation //! comes from. Returns an empty string if the object isn't bound to any particular //! library. inline std::string getModuleFilename(omni::core::IObject* obj) { // getLibraryFilename maps an address to a library. // // the first entry in IObject is the vtable pointer on both Windows and Linux. here we use the first virtual // function address as a function pointer to pass to getLibraryFilename. // // Fun fact: everyone loves pointer dereferencing gymnastics: ** **** ******* **** void** vtbl = *reinterpret_cast<void***>(obj); return carb::extras::getLibraryFilename(vtbl[0]); } } // namespace core } // namespace omni //! Provides a list of dependent interfaces for an ONI plugin. //! //! @param[in] ... The list of fully qualified interface names that this plugin depends on. This //! should include any interfaces, Carbonite or ONI, that this plugin will attempt //! to acquire or create. This allows the Carbonite framework to verify that all //! dependent modules or interfaces are available for a plugin when attempting to //! load it, and allows for a more correct shutdown/unload order for plugins. #define OMNI_PLUGIN_IMPL_DEPS(...) \ template <typename... Types> \ static void getPluginDepsTyped(struct carb::InterfaceDesc** deps, size_t* depsCount) \ { \ static carb::InterfaceDesc depends[] = { Types::getInterfaceDesc()... }; \ *deps = depends; \ *depsCount = sizeof...(Types); \ } \ \ omni::core::Result omniGetDependencies(carb::InterfaceDesc** deps, size_t* depsCount) \ { \ getPluginDepsTyped<__VA_ARGS__>(deps, depsCount); \ return omni::core::kResultSuccess; \ } //! Declares that the calling plugin has no dependencies on any other Carbonite or ONI interfaces. //! //! @remarks This lets the Carbonite plugin manager know that the calling plugin does not //! expect to attempt to acquire any other Carbonite interfaces or create any other //! ONI objects. This helps the plugin manager determine the most correct unload or //! shutdown order for all plugins. #define OMNI_PLUGIN_IMPL_NODEPS() \ omni::core::Result omniGetDependencies(carb::InterfaceDesc** deps, size_t* depsCount) \ { \ *deps = nullptr; \ *depsCount = 0; \ return omni::core::kResultSuccess; \ }
omniverse-code/kit/include/omni/core/ITypeFactory.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! @brief Provides the ITypeFactory interface declaration. #pragma once #include "../../carb/Defines.h" #include "IObject.h" #include "ModuleExports.h" #include "Omni.h" #include "Result.h" #include <cstdint> #include <vector> namespace omni { namespace core { //! Forward declares that the ITypeFactory interface is present. OMNI_DECLARE_INTERFACE(ITypeFactory); //! Function called by ITypeFactory to instantiate an implementation. //! //! This "creation" function is one of the core principles behind @rstref{ABI safety <abi-compatibility>}. By calling //! this simple function, we're able to instantiate a complex implementation of an Omniverse interface. All of the //! details to instantiate this implementation are hidden behind this function. Since this function returns a pointer //! to an interface (IObject), the caller is not exposed to any of the implementation details needed to instantiate the //! interface. using InterfaceImplementationCreateFn = IObject*(); //! Describes a mapping from a chunk of code (i.e. implementation) to one or more interfaces. //! //! Implementation are concrete classes that implement one or more interfaces. //! //! This data structure is essential to the Omniverse type system, as it maps type names (i.e. strings) to chunks of //! code that can instantiate those types. With this, the Omniverse type system is able to map interface type names to //! implementations and implementation type names to specific implementations. struct OMNI_ATTR("no_py") InterfaceImplementation { //! Name of the implementation. This must not be the name of an interface. const char* name; //! Function that instantiates the implementation. //! //! This function can be called concurrently on multiple threads InterfaceImplementationCreateFn* createFn; //! Implementations have versions. By default, ITypeFactory will pick the implementation with the highest version //! number. This behavior can be overridden (see ITypeFactory). //! //! This version number is not an "interface" version number. Interfaces are not versioned. Implementations, //! however, can be versioned. An implementation's version number is used by ITypeFactory to pick the best //! implementation when instantiating an interface. uint32_t version; //! List of interfaces, that when requested to be instantiated by ITypeFactory (e.g. omni::core::createType()), //! should instantiate this implementation. Not all implemented interfaces should be listed here, only those //! interfaces you wish to instantiate via omni::core::createType(). //! //! Said differently, this is a list of interfaces, that when invoked with omni::core::createType(), should //! instantiate this implementation. //! //! Which interfaces should be listed here is subtle topic. See @rstref{omniverse-native-interfaces} for //! more details. const char** interfacesImplemented; //! Number of interfaces implemented (size of interfacesImplemented). Pro-tip: Use //! CARB_COUNTOF32(interfacesImplemented). uint32_t interfacesImplementedCount; }; //! Base type for the flags used when registering plugins or implementations with the type //! factory. These are used to modify how the plugin or implementation is registered. No //! flags are currently defined. These flags will all have the prefix `fTypeFactoryFlag`. using TypeFactoryLoadFlags OMNI_ATTR("flag, prefix=fTypeFactoryFlag") = uint32_t; //! Flag to indicate that no special change in behavior should be used when registering //! a plugin or implementation. constexpr TypeFactoryLoadFlags fTypeFactoryFlagNone = 0x0; //! A mapping from type id's to implementations. //! //! This object maps type id's to concrete implementations. The type id's can represent interface ids or implementation //! ids. //! //! Register types with registerInterfaceImplementationsFromModule() and registerInterfaceImplementations(). //! //! Instantiate types with omni::core::createType(). This is the primary way Omniverse applications are able to //! instantiate concrete implementations of @rstref{ABI-safe <abi-compatibility>} interfaces. See //! omni::core::createType() for a helpful wrapper around omni::core::ITypeFactory::createType(). //! //! In practice, there will be a single ITypeFactory active in the process space (accessible via //! omniGetTypeFactoryWithoutAcquire()). However, @ref omni::core::ITypeFactory is not inherently a singleton, and as //! such multiple instantiations of the interface may exists. This can be used to create private type trees. //! //! Unless otherwise noted, all methods in this interface are thread safe. class ITypeFactory_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.core.ITypeFactory")> { protected: //! Instantiates a concrete type. //! //! The given type id can be an interface or implementation id. //! //! If the id is an interface id, the following rules are followed: //! //! - If the application specified a default implementation, that implementation will be instantiated. //! //! - Otherwise, the first registered implementation of the interface is instantiated. If multiple versions of the //! implementation exist, the highest version is picked. //! //! - implVersion must be 0 since interfaces are not versioned (only implementations are versioned). If implVersion //! is not 0, nullptr is returned. //! //! - If a default module name was provided by the app, the rules above will only be applied to implementations from //! the specified default module. //! //! If the id is an implementation id, the followings rules apply: //! //! - If version is 0, the highest version of the implementation is returned. //! //! - If version is not 0, the returned object is the specified version of the implementation. If such a version //! does not exists, nullptr is returned. If multiple implementations exists with the same version, the //! implementation registered first is instantiated. //! //! In both cases above, if moduleName given, the rules above are followed by only looking at implementations from //! the specified module. If no match is found, nullptr is returned. //! //! If moduleName has not been loaded, it will be loaded and its implementations registered. //! //! If moduleName is nullptr, the rules above are applied across all loaded modules. //! //! This method is thread safe. virtual IObject* OMNI_ATTR("no_py") createType_abi(TypeId id, OMNI_ATTR("c_str") const char* moduleName, uint32_t implVersion) noexcept = 0; //! Registers types from the given module. //! //! If the module is currently loaded, it will not be reloaded and kResultSuccess is returned. //! //! Modules (e.g. .dll or .so) may contain one or many implementations of one or many interfaces. When registering a //! module with the type factory, a function, whose name is described by 'kModuleGetExportsName', is found and //! invoked. Let's assume the exported function name is "omniModuleGetExports". //! //! "omniModuleGetExports" returns a key/value database of the module's capabilities and the module's requirements. //! Some things to note about this database: //! //! - The module's requirements can be marked as optional. //! //! - The module's capabilities can be ignored by ITypeFactory. //! //! These properties allow ITypeFactory and the module to find an intersection of desired functionality in a data //! driven manner. If one party's required needs are not met, the module fails to load (e.g. an appropriate //! omni::core::Result is returned). //! //! It is expected the module has entries in the key/value database describing the functions ITypeFactory should //! call during the loading process. The most important of these entries is the one defined by //! OMNI_MODULE_ON_MODULE_LOAD(), which points to the function ITypeFactory should call to get a list of //! implementations in the module. ITypeFactory invokes exports from the module in the following pattern: //! //! .--------------------------------------------------------------------------------------------------------------. //! | -> Time -> | //! |--------------------------------------------------------------------------------------------------------------| //! | omniModuleGetExports | onLoad (req.) | onStarted (optional) | onCanUnload (optional) | onUnload (optional) | //! | | | impl1->createFn | | | //! | | | impl2->createFn | | | //! | | | impl1->createFn | | | //! \--------------------------------------------------------------------------------------------------------------/ //! //! Above, functions in the same column can be called concurrently. It's up to the module to make sure such call //! patterns are thread safe within the module. //! //! onCanUnload and createFn can be called multiple times. All other functions are called once during the lifecycle //! of a module. //! //! \see omni/core/ModuleExports.h. //! \see onModuleLoadFn //! \see onModuleStartedFn //! \see onModuleCanUnloadFn //! \see onModuleUnloadFn //! //! //! The module can be explicitly unloaded with unregisterInterfaceImplementationsFromModule(). //! //! Upon destruction of this ITypeFactory, unregisterInterfaceImplementationsFromModule is called for each loaded //! module. If the ITypeFactory destructor's call to unregisterInterfaceImplementationsFromModule fails to safely //! unload a module (via the module's onModuleCanUnload and onModuleUnload), an attempt will be made to //! forcefully/unsafely unload the module. //! //! The given module name must not be nullptr. //! //! This method is thread safe. Modules can be loaded in parallel. //! //! \returns Returns kResultSuccess if the module is loaded (either due to this function or a previous call). //! Otherwise, an error is returned. virtual Result registerInterfaceImplementationsFromModule_abi(OMNI_ATTR("c_str, not_null") const char* moduleName, TypeFactoryLoadFlags flags) noexcept = 0; //! Unregisters all types registered from the given module. //! //! Unregistering a module may fail if the module does not belief it can safely be unloaded. This is determined by //! OMNI_MODULE_ON_MODULE_CAN_UNLOAD(). //! //! If unregistration does succeed, the given module will be unloaded from the process space. //! //! Upon destruction of this ITypeFactory, unregisterInterfaceImplementationsFromModule is called for each loaded //! module. If the ITypeFactory destructor's call to unregisterInterfaceImplementationsFromModule fails to safely //! unload a module (via the module's onModuleCanUnload and onModuleUnload), an attempt will be made to //! forcefully/unsafely unload the module. //! //! The given module name must not be nullptr. //! //! This method is thread safe. //! //! \returns Returns kResultSuccess if the module wasn't already loaded or if this method successfully unloaded the //! module. Return an error code otherwise. virtual Result unregisterInterfaceImplementationsFromModule_abi(OMNI_ATTR("c_str, not_null") const char* moduleName) noexcept = 0; //! Register the list of types. //! //! Needed data from the "implementations" list is copied by this method. //! //! This method is thread safe. virtual OMNI_ATTR("no_py") void registerInterfaceImplementations_abi( OMNI_ATTR("in, count=implementationsCount, not_null") const InterfaceImplementation* implementations, uint32_t implementationsCount, TypeFactoryLoadFlags flags) noexcept = 0; //! Maps a type id back to its type name. //! //! The memory returned is valid for the lifetime of ITypeFactory //! //! Returns nullptr if id has never been registered. Types that have been registered, and then unregistered, will //! still have a valid string returned from this method. //! //! This method is thread safe. virtual const char* getTypeIdName_abi(TypeId id) noexcept = 0; //! Sets the implementation matching constraints for the given interface id. //! //! See omni::core::ITypeFactory_abi::createType_abi() for how these constraints are used. //! //! moduleName can be nullptr. //! //! if implVersion is 0 and implId is an implementation id, the implementation with the highest version is chosen. //! //! This method is thread safe. virtual void setInterfaceDefaults_abi(TypeId interfaceId, TypeId implId, OMNI_ATTR("c_str") const char* moduleName, uint32_t implVersion) noexcept = 0; //! Returns the implementation matching constraints for the given interface id. //! //! See omni::core::ITypeFactory_abi::createType_abi() for how these constraints are used. //! //! If the given output implementation id pointer (outImplid) is not nullptr, it will be populated with the default //! implementation id instantiated when the interface requested to be created. //! //! If the given output implementation version pointer (outImplVersion) is not nullptr, it will be populated with //! the default implementation version instantiated when the interface is requested to be created. //! //! If the output module name pointer (outModuleName) is not nullptr, it will be populated with the name of the //! module searched when trying to find an implementation of the interface. If there is no current default module //! name, the output module name will be populated with the empty string. If the output module name's buffer size is //! insufficient to store the null terminated module name, kResultBufferInsufficient is returned and the module //! name's buffer size is updated with the needed buffer size. //! //! If the output module name is nullptr, the output module name buffer size (inOutModuleNameCount) will be //! populated with the size of the buffer needed to store the module name. //! //! The output module name buffer size pointer (inOutModuleNameCount) must not be nullptr. //! //! If the given interface id is not found, kResultNotFound is returned and the output implementation id (outImplId) //! and version (outImplVersion), if defined, are set to 0. Additionally, the output module name (outModuleName), //! if defined, is set to the empty string. //! //! If kResultInsufficientBuffer and kResultNotFound are both flagged internally, kResultNotFound is returned. //! //! See omni::core::getInterfaceDefaults() for a C++ wrapper to this method. //! //! This method is thread safe. virtual OMNI_ATTR("no_py") Result getInterfaceDefaults_abi(TypeId interfaceId, OMNI_ATTR("out") TypeId* outImplId, // nullptr accepted OMNI_ATTR("out" /*count=*inOutModuleNameCount*/) char* outModuleName, // nullptr // accepted OMNI_ATTR("in, out, not_null") uint32_t* inOutModuleNameCount, // must not be null OMNI_ATTR("out") uint32_t* outImplVersion) noexcept = 0; // nullptr accepted }; //! The version number of a @ref TypeFactoryArgs object being passed around. This is used to //! manage backward and forward compatibility checks when an implementation receives the //! object. Newer versions of a type factory implementation are expected to be able to handle //! the layout and content of any older version of this object. constexpr uint16_t kTypeFactoryArgsVersion = 1; //! Arguments passed to omniCreateTypeFactory(). class TypeFactoryArgs { public: //! Version of this structure. The version should be incremented only when removing/rearranging fields. Adding //! fields (from the reserved space) is allowed without incrementing the version. uint16_t version; //! Size of this structure in bytes. uint16_t byteCount; //! Four bytes of intentional padding to ensure the following pointers are appropriately aligned and to //! force the size of this object to a known expected value. uint8_t padding[4]; //! A pointer to the @ref omni::log::ILog implementation object that should be used by the core for all //! logging operations. This may be `nullptr` to use the default internal implementation. omni::log::ILog* log; //! A pointer to the @ref omni::structuredlog::IStructuredLog implementation object that should be //! used by the core for all structured logging operations. This may be `nullptr` to use the default //! implementation. omni::structuredlog::IStructuredLog* structuredLog; //! When adding fields, decrement this reserved space. Be mindful of alignment (explicitly add padding fields if //! needed). void* reserved[13]; TypeFactoryArgs() { std::memset(this, 0, sizeof(*this)); version = kTypeFactoryArgsVersion; byteCount = sizeof(*this); } //! Constructor: initializes a new object explicitly referencing the override objects to use. //! //! @param[in] log_ The @ref omni::log::ILog object to use for all operations that go through //! `ILog`. This may be `nullptr` to use the default internal implementation. //! @param[in] strucLog_ The @ref omni::structuredlog::IStructuredLog object to use for all operations //! that go through `IStructuredLog`. This may be `nullptr` to use the default //! implementation. //! @returns No Return value. //! //! @remarks This initializes a new object with specific override objects. There is currently no way //! to specify that one of the override objects should be disabled completely - if a `nullptr` //! object is passed in, the default implementation will be used instead. //! TypeFactoryArgs(omni::log::ILog* log_, omni::structuredlog::IStructuredLog* strucLog_) : TypeFactoryArgs() { log = log_; structuredLog = strucLog_; } }; CARB_ASSERT_INTEROP_SAFE(TypeFactoryArgs); static_assert((8 + 15 * sizeof(void*)) == sizeof(TypeFactoryArgs), "TypeFactoryArgs has an unexpected size"); } // namespace core } // namespace omni #include "ITypeFactory.gen.h" #ifdef OMNI_COMPILE_AS_DYNAMIC_LIBRARY OMNI_API omni::core::ITypeFactory* omniGetTypeFactoryWithoutAcquire(); #else //! Returns the global ITypeFactory. omni::core::IObject::acquire() is **not** called on the returned pointer. //! //! The global omni::core::ITypeFactory instance can be configured by passing an omni::core::ITypeFactory to //! omniCoreStart(). If an instance is not provided, omniCreateTypeFactory() is called. inline omni::core::ITypeFactory* omniGetTypeFactoryWithoutAcquire() { return static_cast<omni::core::ITypeFactory*>(omniGetBuiltInWithoutAcquire(OmniBuiltIn::eITypeFactory)); } #endif //! Creates a default implementation of ITypeFactory. //! //! The given TypeFactoryArgs pointer will only be accessed during this call. //! //! nullptr is accepted. OMNI_API omni::core::ITypeFactory* omniCreateTypeFactory(const omni::core::TypeFactoryArgs* args = nullptr); // clang-format off OMNI_DEFINE_INTERFACE_API(omni::core::ITypeFactory) { public: //! Instantiates an implementation of interface T. //! //! See omni::core::ITypeFactory_abi::createType_abi() for instantiation rules. template <typename T> inline ObjectPtr<T> createType(const char* moduleName = nullptr, uint32_t version = 0) noexcept { return createType<T>(T::kTypeId, moduleName, version); } //! Instantiates the given type and casts it to T. //! //! The given type id can be an implementation id. //! //! If the interface type T is not implemented by the type id, nullptr is returned. //! //! See omni::core::ITypeFactory_abi::createType_abi() for instantiation rules. template <typename T = IObject> inline ObjectPtr<T> createType(TypeId id, const char* moduleName = nullptr, uint32_t version = 0) noexcept { auto ptr = steal(createType_abi(id, moduleName, version)); return ptr.template as<T>(); } }; // clang-format on namespace omni { namespace core { //! Instantiates an implementation of interface T. //! //! See omni::core::ITypeFactory_abi::createType_abi() for instantiation rules. template <typename T> inline ObjectPtr<T> createType(const char* moduleName = nullptr, uint32_t version = 0) throw() { return omniGetTypeFactoryWithoutAcquire()->createType<T>(moduleName, version); } //! Instantiates the given type and casts it to T. //! //! The given type id can be an implementation id. //! //! If the interface type T is not implemented by the type id, nullptr is returned. //! //! \see omni::core::ITypeFactory_abi::createType_abi() for instantiation rules. template <typename T = IObject> inline ObjectPtr<T> createType(TypeId id, const char* moduleName = nullptr, uint32_t version = 0) CARB_NOEXCEPT { return omniGetTypeFactoryWithoutAcquire()->createType<T>(id, moduleName, version); } //! \see ITypeFactory::registerInterfaceImplementationsFromModule(). inline Result registerInterfaceImplementationsFromModule(const char* moduleName, // e.g. omni.scripting-python.dll TypeFactoryLoadFlags flags = 0) CARB_NOEXCEPT { return omniGetTypeFactoryWithoutAcquire()->registerInterfaceImplementationsFromModule(moduleName, flags); } //! \see ITypeFactory::registerInterfaceImplementations(). inline void registerInterfaceImplementations(const InterfaceImplementation* implementations, uint32_t implementationsCount, TypeFactoryLoadFlags flags = 0) CARB_NOEXCEPT { return omniGetTypeFactoryWithoutAcquire()->registerInterfaceImplementations( implementations, implementationsCount, flags); } //! \see ITypeFactory::getTypeIdName(). inline const char* getTypeIdName(TypeId id) CARB_NOEXCEPT { return omniGetTypeFactoryWithoutAcquire()->getTypeIdName(id); } //! \see ITypeFactory::setInterfaceDefaults(). template <typename T> inline void setInterfaceDefaults(TypeId implId, const char* moduleName, uint32_t implVersion) { omniGetTypeFactoryWithoutAcquire()->setInterfaceDefaults(T::kTypeId, implId, moduleName, implVersion); } //! Given an interface id (i.e. T), returns the preferred implementation (if any) instantiated when calling //! omni::core::ITypeFactory::createType(), the preferred module (if any) searched when calling //! omni::core::ITypeFactory::createType(), and the preferred implementation version number (if any) required //! when calling omni::core::ITypeFactory::createType(). //! //! \see omni::core::ITypeFactory::getInterfaceDefault(). //! //! Unlike the ABI method, this method returns kResultTryAgain if another thread is actively changing the interface //! defaults. This method internally retries multiple times to get the defaults, but will eventually give up with //! kResultTryAgain. //! //! Unlike the ABI method, kResultInsufficientBuffer is never returned. template <typename T> inline Result getInterfaceDefaults(TypeId* implId, std::string* moduleName, uint32_t* implVersion) { if (!moduleName) { uint32_t moduleNameCount = 0; Result result = omniGetTypeFactoryWithoutAcquire()->getInterfaceDefaults( T::kTypeId, implId, nullptr, &moduleNameCount, implVersion); return result; } else { // loop here in case the module name size changes between checking for the size and actually get the string. std::vector<char> buffer; for (unsigned i = 0; i < 4; ++i) { uint32_t moduleNameCount = uint32_t(buffer.size()); Result result = omniGetTypeFactoryWithoutAcquire()->getInterfaceDefaults( T::kTypeId, implId, buffer.data(), &moduleNameCount, implVersion); if (kResultInsufficientBuffer != result) { *moduleName = buffer.data(); return result; } else { buffer.resize(moduleNameCount); } } return kResultTryAgain; } } //! \see ITypeFactory::unregisterInterfaceImplementationsFromModule(). inline Result unregisterInterfaceImplementationsFromModule(const char* moduleName) CARB_NOEXCEPT { return omniGetTypeFactoryWithoutAcquire()->unregisterInterfaceImplementationsFromModule(moduleName); } } // namespace core } // namespace omni
omniverse-code/kit/include/omni/core/Result.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/OmniAttr.h> #include <omni/core/Interface.h> #include <omni/core/ResultError.h> #include <functional> #include <utility> #include <type_traits> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/core/Assert.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! @brief Helper macros to provide assertion checking macros. #pragma once #include "Platform.h" #include "VariadicMacroUtils.h" #include <array> #include <cstdio> #include <utility> namespace omni { namespace core { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { template <size_t N> constexpr std::integral_constant<size_t, N - 1> length(const char (&)[N]) { return {}; } template <size_t N> constexpr std::integral_constant<size_t, N - 1> length(std::array<char, N>) { return {}; } template <typename T> using LengthType = decltype(length(std::declval<T>())); template <typename ARRAY> constexpr void constCopyTo(ARRAY& out, size_t dst, const char* in, size_t sz) { if (sz) { out[dst] = *in; constCopyTo(out, dst + 1, in + 1, sz - 1); } } template <class T, std::size_t N, std::size_t... I> constexpr std::array<std::remove_cv_t<T>, N> constToArrayImpl(T (&a)[N], std::index_sequence<I...>) { return { { a[I]... } }; } template <class T, std::size_t N> constexpr std::array<std::remove_cv_t<T>, N> constToArray(T (&a)[N]) { return constToArrayImpl(a, std::make_index_sequence<N>{}); } template <typename T> constexpr const char* toChars(T& s) { return s; } template <typename T, size_t N> constexpr const char* toChars(const std::array<T, N>& s) { return &(s[0]); } template <typename A, typename B, typename C> constexpr std::array<char, LengthType<A>::value + LengthType<B>::value + LengthType<C>::value + 1> constConcat( const A& a, const B& b, const C& c) { char o[LengthType<A>::value + LengthType<B>::value + LengthType<C>::value + 1]{}; constCopyTo(o, 0, toChars(a), LengthType<A>::value); constCopyTo(o, LengthType<A>::value, toChars(b), LengthType<B>::value); constCopyTo(o, LengthType<A>::value + LengthType<B>::value, toChars(c), LengthType<C>::value); return constToArray(o); } template <typename A, typename B> constexpr std::array<char, LengthType<A>::value + LengthType<B>::value + 1> constConcat(const A& a, const B& b) { return constConcat(a, b, ""); } template <typename A> constexpr std::array<char, LengthType<A>::value + 1> constConcat(const A& a) { return constConcat(a, "", ""); } template <bool I> struct Assertion { constexpr static char Message[] = "Assertion (%s) failed:"; }; template <> struct Assertion<false> { constexpr static char Message[] = "Assertion (%s) failed."; }; constexpr bool hasArg(const char*) { return true; } constexpr bool hasArg() { return false; } } // namespace detail #endif } // namespace core } // namespace omni //! This macro is surprisingly complex mainly because it accepts a variable number of arguments. If a single argument is //! given, a message in the following form is printed: //! //! "Assertion (myCondition) failed.\n" //! //! If multiple arguments are given, the message become more dynamic: //! //! "Assertion (var == 1) failed: var == 2\n" //! //! Where the latter part of the message "var == 2" is provided by the caller. //! //! So, if multiple arguments are given, this macro must: //! //! - End the first part of the message with a : instead of an . //! //! - Concatenate the the fixed format string ("Assertion (%s)..."") with the user supplied format message. //! //! - Add a newline. //! //! All of this at compile time. //! //! We use a couple of tricks to do this (all in portable C++). //! //! - Via the preprocessor, we can't detect if __VA_ARGS__ is empty (this is a preprocessor limitation). We can forward //! the __VA_ARGS__ to a constexpr (hasArg). hasArg is an overloaded constexpr that will return true if an argument //! was supplied. We can then use the result of this overload to select a template specialization (Assertion<>) //! containing a constexpr with our format string. //! //! - We need to concatenate the first part of the format string with the user provided portion and a newline. We can't //! do this with the preprocess because the first part of the message is a constexpr, not a string literal. To get //! around this we use a constexpr (constConcat) to perform the concatenation. //! //! - The user format string may be empty. We use the preprocessor's string concatenation in //! OMNI_VA_FIRST_OR_EMPTY_STRING to make sure a second argument is passed to the constexpr string concatenation //! function. //! //! - When passing the __VA_ARGS__ to fprintf, we need to elide the first argument (since it's the user supplied format //! we already concatenated) and provide a comma if any additional arguments were given. While the preprocessor cannot //! detect if __VA_ARGS__ is empty, it can detect if at least two arguments are supplied. OMNI_VA_COMMA_WITHOUT_FIRST //! uses this to determine if a comma should be added and to elide the first user supplied argument (the user's //! format string). //! //! Some of this code can be simplified with C++20's __VA_OPT__. //! Checks if the given condition is true, if not, the given optional message is printed to stdout and the program is //! terminated. //! //! Use this macro when an unrecoverable situation has been detected. #define OMNI_FATAL_UNLESS(cond_, ...) \ do \ { \ if (!CARB_LIKELY(cond_)) \ { \ auto constexpr const failMsg_ = omni::core::detail::constConcat( \ omni::core::detail::Assertion<omni::core::detail::hasArg(OMNI_VA_FIRST(__VA_ARGS__))>::Message, \ OMNI_VA_FIRST_OR_EMPTY_STRING(__VA_ARGS__)); \ auto constexpr const fmt_ = \ omni::core::detail::constConcat(__FILE__ ":" CARB_STRINGIFY(__LINE__) ": ", failMsg_, "\n"); \ std::fprintf(stderr, fmt_.data(), #cond_ OMNI_VA_COMMA_WITHOUT_FIRST(__VA_ARGS__)); \ OMNI_BREAK_POINT(); \ } \ } while (0) //! Indicates whether runtime checking is enabled. For the time being this is always set to `1` //! indicating that the default implementation should not be overridden. This may change in //! the future. #define OMNI_CHECK_ENABLED 1 //! Checks if the given condition is true, if not, the given optional message is printed to stdout and the program is //! terminated. //! //! Unlike OMNI_ASSERT, this macro runs checks in release builds. //! //! Use this macro to when you fail to provide adequate test coverage. #define OMNI_CHECK OMNI_FATAL_UNLESS #if CARB_DEBUG //! Like std::assert. Basically OMNI_FATAL_UNLESS, but compiles to a no-op in debug builds. # define OMNI_ASSERT(cond, ...) OMNI_FATAL_UNLESS(cond, __VA_ARGS__) //! Set to 1 to indicate that assertion checks are enabled. Set to 0 if assertion checks will //! just be ignored. This value will always be defined regardless of the current mode. # define OMNI_ASSERT_ENABLED 1 #else //! Like std::assert. Basically OMNI_FATAL_UNLESS, but compiles to a no-op in debug builds. # define OMNI_ASSERT(cond, ...) ((void)0) //! Set to 1 to indicate that assertion checks are enabled. Set to 0 if assertion checks will //! just be ignored. This value will always be defined regardless of the current mode. # define OMNI_ASSERT_ENABLED 0 #endif
omniverse-code/kit/include/omni/core/Platform.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! @brief Helper macros to detect the current platform. #pragma once #include "../../carb/Defines.h" //! Set to `1` if compiling a Windows build. Set to `0` otherwise. This symbol will //! always be defined even when not on a Windows build. It can thus be used to pass //! as parameters or in if-statements to modify behavior based on the platform. #define OMNI_PLATFORM_WINDOWS CARB_PLATFORM_WINDOWS //! Set to `1` if compiling a Linux build. Set to `0` otherwise. This symbol will //! always be defined even when not on a Linux build. It can thus be used to pass //! as parameters or in if-statements to modify behavior based on the platform. #define OMNI_PLATFORM_LINUX CARB_PLATFORM_LINUX //! Set to `1` if compiling a MacOS build. Set to `0` otherwise. This symbol will //! always be defined even when not on a MacOS build. It can thus be used to pass //! as parameters or in if-statements to modify behavior based on the platform. #define OMNI_PLATFORM_MACOS CARB_PLATFORM_MACOS /** @copydoc CARB_POSIX */ #define OMNI_POSIX CARB_POSIX #if OMNI_PLATFORM_LINUX || OMNI_PLATFORM_MACOS || defined(DOXYGEN_BUILD) //! Triggers a breakpoint. If no debugger is attached, the program terminates. # define OMNI_BREAK_POINT() ::raise(SIGTRAP) #elif OMNI_PLATFORM_WINDOWS //! Triggers a breakpoint. If no debugger is attached, the program terminates. # define OMNI_BREAK_POINT() ::__debugbreak() #else CARB_UNSUPPORTED_PLATFORM(); #endif
omniverse-code/kit/include/omni/core/Interface.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! @brief Helper functions for collecting module information. #pragma once #include "../../carb/Interface.h" //! Used to declare the interface description for an ONI object's API layer. //! //! @param[in] name The fully qualified name of the interface (as a string literal) //! that contains this call. This call must be made from the class //! scope for the interface's API layer. //! //! @note This does not need to be called directly if the `omni.bind` tool is being used //! to generate the API layer for an interface. The `omni.bind` tool will insert //! this call automatically. #define OMNI_PLUGIN_INTERFACE(name) \ /** \ * Returns information about this interface. Auto-generated by OMNI_PLUGIN_INTERFACE(). \ * @returns The carb::InterfaceDesc struct with information about this interface. \ */ \ static carb::InterfaceDesc getInterfaceDesc() \ { \ return carb::InterfaceDesc{ name, { 1, 0 } }; \ }
omniverse-code/kit/include/omni/core/BuiltIn.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Header file for Omni built-in interfaces. #pragma once #include "Api.h" //! Used by omniGetBuiltInWithoutAcquire() to specify the desired interface. //! //! @warning Do not use omniGetBuiltInWithoutAcquire() directly. Instead use the referenced inline function for the //! desired OmniBuiltIn enum value. enum class OmniBuiltIn { //! Returns a reference to ITypeFactory. Use omniGetTypeFactoryWithoutAcquire() inline function. eITypeFactory, //! Returns a reference to ILog. Use omniGetLogWithoutAcquire() inline function. eILog, //! Returns a reference to IStructuredLog. Use omniGetStructuredLogWithoutAcquire() inline function. eIStructuredLog, }; //! Returns a built-in interface based on the given parameter. //! //! @warning This function should not be used. Instead, use the specific inline function for the desired OmniBuiltIn. OMNI_API void* omniGetBuiltInWithoutAcquire(OmniBuiltIn);
omniverse-code/kit/include/omni/core/ResultError.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Helpers related reporting errors from @ref omni::core::Result. #pragma once #include "../../carb/extras/Debugging.h" #include "Assert.h" #include "IObject.h" #include <stdexcept> #include <string> namespace omni { namespace core { //! Given a @ref Result code, returns a human readable interpretation of the //! code. //! //! A valid pointer is always returned. The returned pointer is valid for the //! lifetime of the module from which the function was called. inline const char* resultToString(Result result) { #ifndef DOXYGEN_BUILD # define OMNI_RESULT_CODE_GEN_RESULT_TO_STRING_CASE(symbol_, snek_symbol_, message_) \ case kResult##symbol_: \ return message_; #endif switch (result) { OMNI_RESULT_CODE_LIST(OMNI_RESULT_CODE_GEN_RESULT_TO_STRING_CASE) default: return "unknown error"; } #undef OMNI_RESULT_CODE_GEN_RESULT_TO_STRING_CASE } //! Exception object that encapsulates a @ref Result along with a customizable //! message. class ResultError : public std::exception { public: //! Constructor. ResultError(Result result) : m_result{ result } { } //! Constructor with custom messages. ResultError(Result result, std::string msg) : m_result{ result }, m_msg(std::move(msg)) { } //! Returns a human readable description of the error. virtual const char* what() const noexcept override { if (m_msg.empty()) { return resultToString(m_result); } else { return m_msg.c_str(); } } //! Return the result code that describes the error. Result getResult() const noexcept { return m_result; } private: Result m_result; std::string m_msg; }; } // namespace core } // namespace omni #if CARB_DEBUG && !defined(DOXYGEN_BUILD) # define OMNI_RETURN_ERROR(e_) \ carb::extras::debuggerBreak(); \ return e_; #else //! Helper macro used to return a @ref omni::core::Result. When in debug mode //! and attached to a debugger, this macro will cause a debugger break. Useful //! for determining the origin of an error. # define OMNI_RETURN_ERROR(e_) return e_ #endif //! Helper macro to catch exceptions and return them as @ref omni::core::Result //! codes. Useful when writing ABI code. #define OMNI_CATCH_ABI_EXCEPTION() \ catch (const omni::core::ResultError& e_) \ { \ OMNI_RETURN_ERROR(e_.getResult()); \ } \ catch (...) \ { \ OMNI_RETURN_ERROR(omni::core::kResultFail); \ } //! Helper macro to convert a @ref omni::core::Result to a @ref //! omni::core::ResultError exception. Useful when authoring API code. Used //! heavily by *omni.bind*. #define OMNI_THROW_IF_FAILED(expr_) \ do \ { \ omni::core::Result result_ = (expr_); \ if (OMNI_FAILED(result_)) \ { \ throw omni::core::ResultError{ result_ }; \ } \ } while (0) //! Helper macro to return an appropriate @ref omni::core::Result when the given //! argument is @c nullptr. Useful when authoring ABI code. //! //! Note, use of this macro should be rare since *omni.bind* will check for @c //! nullptr arguments in the generated API code. #define OMNI_RETURN_IF_ARG_NULL(expr_) \ do \ { \ if (nullptr == expr_) \ { \ OMNI_RETURN_ERROR(omni::core::kResultInvalidArgument); \ } \ } while (0) //! Helper macro to throw a @ref omni::core::ResultError exception if a function argument is //! @c nullptr. Used heavily by *omni.bind*. #define OMNI_THROW_IF_ARG_NULL(ptr_) \ do \ { \ if (!ptr_) \ { \ auto constexpr const msg_ = __FILE__ ":" CARB_STRINGIFY(__LINE__) /*": " CARB_PRETTY_FUNCTION*/ \ ": argument '" #ptr_ "' must not be nullptr"; \ throw omni::core::ResultError(omni::core::kResultInvalidArgument, msg_); \ } \ } while (0)
omniverse-code/kit/include/omni/core/IWeakObject.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IWeakObject.h //! //! @brief Defines @ref omni::core::IWeakObject. #pragma once #include <carb/cpp/Atomic.h> // atomic_ref #include <carb/detail/DeferredLoad.h> #include <omni/core/Api.h> // OMNI_API #include <omni/core/Assert.h> #include <omni/core/IObject.h> #include <limits> namespace omni { namespace core { class IWeakObject; class IWeakObject_abi; class IWeakObjectControlBlock; class IWeakObjectControlBlock_abi; //! Control block to maintain weak and strong reference counts for an object. //! //! The @ref IWeakObject interface supports the notion of "weak pointers". Unlike "strong pointers" (e.g. @ref //! ObjectPtr) weak pointers do not affect the pointee's reference count. While this sounds like a raw pointer (and //! possibly a bad idea), the magic of a weak pointer is that if the pointee's reference count goes to zero, the weak //! pointer updates its internal pointer to `nullptr`. //! //! @ref IWeakObjectControlBlock is an ABI-safe object used to store a pointer to both the object and the object's //! reference count (i.e. the "strong count"). This object additionally stores a "weak count", which is a count of //! objects pointing to the @ref IWeakObjectControlBlock. //! //! Both @ref WeakPtr and @ref IWeakObject affect the "weak count". //! //! Only @ref ObjectPtr will affect the "strong count". //! //! Direct usage of this object should be avoided. See @ref WeakPtr to learn how weak pointers are used in practice. //! //! **Advanced: Design Considerations** //! //! The design of ONI's weak pointers takes three main design considerations into account: //! //! - The user API should work similar to <a href="https://en.cppreference.com/w/cpp/memory/weak_ptr">std::weak_ptr</a>. //! //! - Enabling weak pointer support for an object should should not tank performance in hot code paths. //! //! - Weak pointers must be able to point to object's whose DLL has been unloaded from memory. //! //! Above, the final point has a strong affect on the implementation of weak pointers. In particular, this object (i.e. //! @ref IWeakObjectControlBlock). Consider: //! //! - For a virtual function to be called successfully, the code implementing the virtual function must still be loaded. //! //! - An @ref IWeakObjectControlBlock may outlive the DLL that created the object to which it points. //! //! Rather than exposing a raw struct with the weak and strong counts (and associated inline code to manipulate them), //! this interface is used to hide both the counts and the manipulation logic. However, this introduces virtual //! functions, which could potentially be unloaded. To address the unloading problem, *carb.dll* provides //! `omni::core::getOrCreateWeakObjectControlBlock()`. This C-ABI returns an implementation of @ref //! IWeakObjectControlBlock implemented within *carb.dll*. This effectively avoids the DLL unloading problem, since //! *carb.dll* is considered a core dependency that cannot be unloaded and therefore the virtual function //! implementations for @ref IWeakObjectControlBlock will always be loaded. class IWeakObjectControlBlock_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.core.IWeakObjectControlBlock")> { protected: //! Returns a pointer to the object pointed to by this control block. May return `nullptr`. //! //! If the object pointed to by this control block has a strong reference count of zero, `nullptr` is returned. //! Otherwise, @ref IObject::acquire() is called on the object before being returned. //! //! @thread_safety This method is thread safe. virtual IObject* getObject_abi() noexcept = 0; }; //! Interface defining a contract for objects which support "weak"/non-owning references. //! //! This interface works tightly with @ref WeakPtr to implement weak pointers. Users of weak pointers should focus on //! @ref WeakPtr rather than this interface, as this interface is an implementation detail of the weak pointer ABI. //! //! Developers wishing to add weak pointer support to their objects must implement this interface, which is a //! non-trivial task. A default implementation is provided in @ref ImplementsWeak. class IWeakObject_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.core.IWeakObject")> { protected: //! Returns a control block containing reference count information needed for the implementation of weak pointers. //! //! Users of weak pointers must never call this method. Rather, they should focus on exclusively using @ref //! WeakPtr. //! //! Implementers of this method are encouraged to use the implementation found in @ref omni::core::ImplementsWeak. //! //! The returns pointer is never `nullptr`. //! //! The returned pointer will have @ref IObject::acquire() called on it before being returned. //! //! @thread_safety This method is thread safe. virtual OMNI_ATTR("not_null") IWeakObjectControlBlock* getWeakObjectControlBlock_abi() noexcept = 0; }; } // namespace core } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/core/IWeakObject.gen.h> namespace omni { namespace core { //! @copydoc omni::core::IWeakObjectControlBlock_abi class IWeakObjectControlBlock : public omni::core::Generated<omni::core::IWeakObjectControlBlock_abi> { }; //! @copydoc omni::core::IWeakObject_abi class IWeakObject : public omni::core::Generated<omni::core::IWeakObject_abi> { }; //! Weak pointer to ONI objects much like <a href="https://en.cppreference.com/w/cpp/memory/weak_ptr">std::weak_ptr</a>. //! //! The @ref IWeakObject interface support the notion of "weak pointers". Unlike "strong pointers" (e.g. @ref //! ObjectPtr) weak pointers do not affect the pointee's reference count. While this sounds like a raw pointer (and //! possibly a bad idea), the magic of a weak pointer is that if the pointee's reference count goes to zero, the weak //! pointer updates its internal pointer to `nullptr`. //! //! Below are several practical use cases of weak pointers. //! //! **Breaking Reference Count Cycles** //! //! A reference count cycle happens when an object "A" contains an @ref ObjectPtr to object "B". At this same time, //! object "B" holds an @ref ObjectPtr to object "A". Since each object increments the other's reference count, neither //! is every destructed. To break this cycle, one of the objects can hold a @ref WeakPtr to the other object. //! //! **Pointing to Objects Whose Code May Be Unloaded From Memory** //! //! Carbonite supports the notion of plugins that can be loaded, unloaded, and reloaded at runtime. Often code from DLL //! *X* holds an @ref ObjectPtr to code from DLL *Y*. If the user unloads *Y*, and DLL *X* still wishes to use *Y*, the //! application is likely to crash when DLL *X* attempts to access the unloaded code. //! //! Instead of storing an @ref ObjectPtr, a @ref WeakPtr can be used instead. When DLL *X* wants to access the code in //! DLL *Y*, @ref WeakPtr::getObjectPtr() is called, which converts the @ref WeakPtr into an @ref ObjectPtr. Here, if //! the underlying object's strong reference count is zero, the returned @ref ObjectPtr will point to `nullptr`. DLL //! *X* simply must check if the @ref ObjectPtr points to `nullptr` before using the pointer. //! //! Above, we make the assumption that DLL *Y* will either: //! //! - Refuse to unload as long as an object it produced has a non-zero strong reference count. //! //! - Will cleanup all external @ref ObjectPtr objects that hold a reference to an object produced by the DLL. One way //! to implement this is for the plugin to allow callbacks to be registered with it that will be invoked when the DLL //! is about to unloaded. //! //! **Usage** //! //! Weak pointers should be used as follows: //! #ifdef CARB_DOC_BUILD //! @snippet "source/tests/test.unit/omni.core/TestWeakPtr.cpp" carb-docs-weakptr-example-use #endif template <typename T> class WeakPtr { public: //! Allow implicit conversion from nullptr to an WeakPtr. WeakPtr(std::nullptr_t = nullptr) noexcept { } //! Strong pointer to weak pointer conversion. WeakPtr(const omni::core::ObjectPtr<T>& strong) noexcept { if (strong) { m_ref = strong->getWeakObjectControlBlock(); } } //! Raw pointer to weak pointer conversion. WeakPtr(T* strong) noexcept { if (strong) { m_ref = strong->getWeakObjectControlBlock(); } } //! Copy constructor. WeakPtr(const WeakPtr& other) noexcept = default; //! Move constructor. WeakPtr(WeakPtr&& other) noexcept = default; ~WeakPtr() noexcept = default; //! Assignment operator. //! //! @thread_safety This method is not thread safe. WeakPtr& operator=(const WeakPtr& other) noexcept = default; //! Move assignment operator. //! //! @thread_safety This method is not thread safe. WeakPtr& operator=(WeakPtr&& other) noexcept = default; //! Returns an @ref omni::core::ObjectPtr to the object to which this weak pointer is pointing. //! //! The returned object will point to `nullptr` if there are no "strong" references to the underlying object. //! Otherwise, if a non-`nullptr` pointer is returned, the object will live at least as long as the returned @ref //! omni::core::ObjectPtr. //! //! To understand how/when to use weak pointers and this method, consult the class documentation for @ref //! omni::core::WeakPtr. //! //! Equivalent to @ref WeakPtr::lock(). //! //! @thread_safety This method is not thread safe. omni::core::ObjectPtr<T> getObjectPtr() const noexcept { if (m_ref) { return m_ref->getObject().template as<T>(); } return nullptr; } //! Returns an @ref omni::core::ObjectPtr to the object to which this weak pointer is pointing. //! //! The returned object will point to `nullptr` if there are no "strong" references to the underlying object. //! Otherwise, if a non-`nullptr` pointer is returned, the object will live at least as long as the returned @ref //! omni::core::ObjectPtr. //! //! To understand how/when to use weak pointers and this method, consult the class documentation for @ref //! omni::core::WeakPtr. //! //! Equivalent to @ref WeakPtr::getObjectPtr(). //! //! @thread_safety This method is not thread safe. omni::core::ObjectPtr<T> lock() const noexcept { if (m_ref) { return m_ref->getObject().template as<T>(); } return nullptr; } private: ObjectPtr<IWeakObjectControlBlock> m_ref; }; #ifndef DOXYGEN_BUILD namespace detail { enum class WeakObjectControlBlockOp { eIncrementStrong = 0, eDecrementStrong = 1, eDecrementWeak = 2, eGetStrongCount = 3, // for testing eGetWeakCount = 4, // for testing eHasControlBlock = 5, // for testing }; } // namespace detail #endif // DOXYGEN_BUILD } // namespace core } // namespace omni // carb.dll C-ABI to hide the implementation details of a weak object's control block. see IWeakObject's class docs for // motivation. #ifndef DOXYGEN_BUILD # if CARB_REQUIRE_LINKED OMNI_API omni::core::IWeakObjectControlBlock* omniWeakObjectGetOrCreateControlBlock(omni::core::IObject* obj, uintptr_t* refCountOrEncodedPtr); OMNI_API uint32_t omniWeakObjectControlBlockOp(uintptr_t* refCountOrEncodedPtr, omni::core::WeakObjectControlBlockOp op); # else OMNI_API omni::core::IWeakObjectControlBlock* omniWeakObjectGetOrCreateControlBlock(omni::core::IObject* obj, uintptr_t* refCountOrEncodedPtr) CARB_ATTRIBUTE(weak); OMNI_API uint32_t omniWeakObjectControlBlockOp(uintptr_t* refCountOrEncodedPtr, omni::core::detail::WeakObjectControlBlockOp op) CARB_ATTRIBUTE(weak); # endif #endif // DOXYGEN_BUILD namespace omni { namespace core { #ifndef DOXYGEN_BUILD namespace detail { CARB_DETAIL_DEFINE_DEFERRED_LOAD(loadWeakObjectGetOrCreateControlBlock, omniWeakObjectGetOrCreateControlBlock, (omni::core::IWeakObjectControlBlock * (*)(omni::core::IObject*, uintptr_t*))); //! Returns an implementation of @ref omni::core::IWeakObjectControlBlock provided by *carb.dll*. //! //! This method is an implementation detail and should not directly be called by users. //! //! The provided parameter, @p refCountOrPtr, is a pointer that points to a value. That value can represent either: //! //! - An object's reference count (i.e. strong count). //! //! - An **encoded** pointer to an @ref omni::core::IWeakObjectControlBlock object. //! //! The purpose of this function is to determine if the value is either a reference count or an encoded pointer to the //! control block. If it is a reference count, an @ref omni::core::IWeakObjectControlBlock is allocated and the value //! is updated to point to this new block. If the value is already an encoded pointer to a control block, the control //! block's weak reference count is incremented (i.e. @ref omni::core::IObject::acquire() is called on the *control //! block*). //! //! In both cases, a pointer to the control block is returned. It is up to the caller to ensure @ref //! omni::core::IObject::release() is called on the returned pointer once the object is no longer in use. //! //! See @ref omni::core::IWeakObjectControlBlock for motivation as to why this function is needed. //! //! See @ref omni::core::WeakPtr and @ref omni::core::ImplementsWeak to understand how to use weak pointers and how to //! enable weak pointer support in your objects. //! //! @thread_safety All operations on the given value are thread safe. inline omni::core::IWeakObjectControlBlock* getOrCreateWeakObjectControlBlock(omni::core::IObject* obj, uintptr_t* refCountOrEncodedPtr) { auto impl = detail::loadWeakObjectGetOrCreateControlBlock(); OMNI_ASSERT(impl); return impl(obj, refCountOrEncodedPtr); } CARB_DETAIL_DEFINE_DEFERRED_LOAD(loadWeakObjectControlBlockOp, omniWeakObjectControlBlockOp, (uint32_t(*)(uintptr_t*, WeakObjectControlBlockOp))); //! Increments the strong count of an object that is implemented with @ref omni::core::ImplementsWeak. //! //! This method is an implementation detail and should not directly be called by users. //! //! The provided parameter, @p refCountOrPtr, is a pointer that points to a value. That value can represent either: //! //! - An object's reference count (i.e. strong count). //! //! - An **encoded** pointer to an @ref omni::core::IWeakObjectControlBlock object. //! //! This method determines which of the above cases is true and atomically increments the strong count. //! //! See @ref omni::core::IWeakObjectControlBlock for motivation as to why this function is needed. //! //! See @ref omni::core::WeakPtr and @ref omni::core::ImplementsWeak to understand how to use weak pointers and how to //! enable weak pointer support in your objects. //! //! @thread_safety All operations on the given value are thread safe. inline uint32_t incrementWeakObjectStrongCount(uintptr_t* refCountOrEncodedPtr) { auto impl = detail::loadWeakObjectControlBlockOp(); OMNI_ASSERT(impl); return impl(refCountOrEncodedPtr, WeakObjectControlBlockOp::eIncrementStrong); } //! Decrements the strong count of an object that is implemented with @ref omni::core::ImplementsWeak. //! //! This method is an implementation detail and should not directly be called by users. //! //! The provided parameter, @p refCountOrPtr, is a pointer that points to a value. That value can represent either: //! //! - An object's reference count (i.e. strong count). //! //! - An **encoded** pointer to an @ref omni::core::IWeakObjectControlBlock object. //! //! This method determines which of the above cases is true and atomically decrements the strong count. //! //! See @ref omni::core::IWeakObjectControlBlock for motivation as to why this function is needed. //! //! See @ref omni::core::WeakPtr and @ref omni::core::ImplementsWeak to understand how to use weak pointers and how to //! enable weak pointer support in your objects. //! //! @thread_safety All operations on the given value are thread safe. inline uint32_t decrementWeakObjectStrongCount(uintptr_t* refCountOrEncodedPtr) { auto impl = detail::loadWeakObjectControlBlockOp(); OMNI_ASSERT(impl); return impl(refCountOrEncodedPtr, WeakObjectControlBlockOp::eDecrementStrong); } //! Decrements the weak count of an object that is implemented with @ref omni::core::ImplementsWeak. //! //! This method is an implementation detail and should not directly be called by users. //! //! The provided parameter, @p refCountOrPtr, is a pointer that points to a value. That value can represent either: //! //! - An object's reference count (i.e. strong count). //! //! - An **encoded** pointer to an @ref omni::core::IWeakObjectControlBlock object. //! //! This method determines which of the above cases is true and atomically decrements the weak count. //! //! See @ref omni::core::IWeakObjectControlBlock for motivation as to why this function is needed. //! //! See @ref omni::core::WeakPtr and @ref omni::core::ImplementsWeak to understand how to use weak pointers and how to //! enable weak pointer support in your objects. //! //! @thread_safety All operations on the given value are thread safe. inline void decrementWeakObjectWeakCount(uintptr_t* refCountOrEncodedPtr) { auto impl = detail::loadWeakObjectControlBlockOp(); OMNI_ASSERT(impl); (void)impl(refCountOrEncodedPtr, WeakObjectControlBlockOp::eDecrementWeak); } //! Returns the strong count of an object that is implemented with @ref omni::core::ImplementsWeak. //! //! This method is an implementation detail and should not directly be called by users. //! //! The provided parameter, @p refCountOrPtr, is a pointer that points to a value. That value can represent either: //! //! - An object's reference count (i.e. strong count). //! //! - An **encoded** pointer to an @ref omni::core::IWeakObjectControlBlock object. //! //! This method determines which of the above cases is true and returns the strong count. //! //! See @ref omni::core::IWeakObjectControlBlock for motivation as to why this function is needed. //! //! See @ref omni::core::WeakPtr and @ref omni::core::ImplementsWeak to understand how to use weak pointers and how to //! enable weak pointer support in your objects. //! //! @thread_safety All operations on the given value are thread safe. inline uint32_t getWeakObjectStrongCount(uintptr_t* refCountOrEncodedPtr) { auto impl = detail::loadWeakObjectControlBlockOp(); OMNI_ASSERT(impl); return impl(refCountOrEncodedPtr, WeakObjectControlBlockOp::eGetStrongCount); } //! Returns the weak count of an object that is implemented with @ref omni::core::ImplementsWeak. //! //! This method is an implementation detail and should not directly be called by users. //! //! The provided parameter, @p refCountOrPtr, is a pointer that points to a value. That value can represent either: //! //! - An object's reference count (i.e. strong count). //! //! - An **encoded** pointer to an @ref omni::core::IWeakObjectControlBlock object. //! //! This method determines which of the above cases is true and returns the weak count. //! //! See @ref omni::core::IWeakObjectControlBlock for motivation as to why this function is needed. //! //! See @ref omni::core::WeakPtr and @ref omni::core::ImplementsWeak to understand how to use weak pointers and how to //! enable weak pointer support in your objects. //! //! @thread_safety All operations on the given value are thread safe. inline uint32_t getWeakObjectWeakCount(uintptr_t* refCountOrEncodedPtr) { auto impl = detail::loadWeakObjectControlBlockOp(); OMNI_ASSERT(impl); return impl(refCountOrEncodedPtr, WeakObjectControlBlockOp::eGetWeakCount); } //! Returns the `true` on object that is implemented with @ref omni::core::ImplementsWeak has had a weak pointer //! attached to it. //! //! This method is an implementation detail and should not directly be called by users. //! //! The provided parameter, @p refCountOrPtr, is a pointer that points to a value. That value can represent either: //! //! - An object's reference count (i.e. strong count). //! //! - An **encoded** pointer to an @ref omni::core::IWeakObjectControlBlock object. //! //! This method determines which of the above cases is true and return true if the value points to a control block. //! //! See @ref omni::core::IWeakObjectControlBlock for motivation as to why this function is needed. //! //! See @ref omni::core::WeakPtr and @ref omni::core::ImplementsWeak to understand how to use weak pointers and how to //! enable weak pointer support in your objects. //! //! @thread_safety All operations on the given value are thread safe. inline bool hasWeakObjectControlBlock(uintptr_t* refCountOrEncodedPtr) { auto impl = detail::loadWeakObjectControlBlockOp(); OMNI_ASSERT(impl); return (0 != impl(refCountOrEncodedPtr, WeakObjectControlBlockOp::eHasControlBlock)); } } // namespace detail #endif // DOXYGEN_BUILD //! Helper template for implementing one or more interfaces that support weak pointers. //! //! This class has similar functionality as @ref Implements but adds support for @ref IWeakObject. //! //! As an example, consider the following interface: //! #ifdef CARB_DOC_BUILD //! @snippet "source/tests/test.unit/omni.core/TestWeakPtr.cpp" carb-docs-weakptr-example-interface #endif //! //! Note that the interface inherits from @ref IWeakObject rather than @ref IObject. //! //! To implement the interface above, @ref ImplementsWeak (i.e. this class) can be used as follows: //! #ifdef CARB_DOC_BUILD //! @snippet "source/tests/test.unit/omni.core/TestWeakPtr.cpp" carb-docs-weakptr-example-class #endif template <typename T, typename... Rest> struct ImplementsWeak : public ImplementsCast<T, Rest...> { public: //! @copydoc omni::core::IObject::acquire. inline void acquire() noexcept { // note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it // has zero-overhead. static_cast<T*>(this)->acquire(); } //! @copydoc omni::core::IObject::release. inline void release() noexcept { // note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it // has zero-overhead. static_cast<T*>(this)->release(); } //! @copydoc omni::core::IWeakObject::getWeakObjectControlBlock_abi. inline ObjectPtr<IWeakObjectControlBlock> getWeakObjectControlBlock() noexcept { // note: this implementation is needed to disambiguate which `getWeakObjectControlBlock` to call when using // multiple inheritance. it has zero-overhead. return static_cast<T*>(this)->getWeakObjectControlBlock(); } protected: //! Destructor virtual ~ImplementsWeak() noexcept { // decrementWeakObjectWeakCount() will no-op if a control block has not been created omni::core::detail::decrementWeakObjectWeakCount(&m_refCountOrPtr); } //! @copydoc omni::core::IObject::acquire. virtual void acquire_abi() noexcept override { omni::core::detail::incrementWeakObjectStrongCount(&m_refCountOrPtr); } //! @copydoc omni::core::IObject::release. virtual void release_abi() noexcept override { if (0 == omni::core::detail::decrementWeakObjectStrongCount(&m_refCountOrPtr)) { delete this; } } //! @copydoc omni::core::IWeakObject::getWeakObjectControlBlock virtual IWeakObjectControlBlock* getWeakObjectControlBlock_abi() noexcept override { return omni::core::detail::getOrCreateWeakObjectControlBlock(static_cast<T*>(this), &m_refCountOrPtr); } #ifndef DOXYGEN_BUILD //! Return the strong reference count. Should only be used for testing and debugging. uint32_t _getStrongCount() noexcept { return omni::core::detail::getWeakObjectStrongCount(&m_refCountOrPtr); } //! Return `true` if a weak object control block has been created for this object. Should only be used for testing //! and debugging. bool _hasWeakObjectControlBlock() noexcept { return omni::core::detail::hasWeakObjectControlBlock(&m_refCountOrPtr); } #endif private: // by default, this value stores the reference count of the object. // // however, when getWeakObjectControlBlock_abi() is called, this memory is repurposed to store a pointer to an // IWeakObjectControlBlock. it's the IWeakObjectControlBlock that will store both a strong and weak reference count // for this object. // // the pointer to the IWeakObjectControlBlock count is "encoded" so that we can easily determine if this memory is a // reference count or a pointer to an IWeakObjectControlBlock. // // the encoding of this pointer is an implementation detail and not exposed to the user. // // this value should be treated as opaque. uintptr_t m_refCountOrPtr{ 1 }; }; } // namespace core } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/core/IWeakObject.gen.h>
omniverse-code/kit/include/omni/core/VariadicMacroUtils.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // this macro can be used to count the number of arguments, returning a user defined value for each count. // // some examples as to what you can do with this: // // - return the argument count // // - return 1 if the count is even, and 0 if the count is odd. // // - return "one", "two", "three", etc. based on the argument count // // note, if the argument list is empty, this macro counts the argument list as 1. in short, this macro cannot detect an // empty list. // // to call the macro, pass the argument list along with a reversed list of a mapping from the count to the desired // value. for example, to return the argument count: // // #define COUNT(...) // OMNI_VA_GET_ARG_64(__VA_ARGS__, 63, 62, 61, 60, // 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, // 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, // 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) #define OMNI_VA_GET_ARG_64(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, \ _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, \ _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, \ _57, _58, _59, _60, _61, _62, _63, N, ...) \ N // needed in by MSVC's preprocessor to evaluate __VA_ARGS__. harmless on other compilers. #define OMNI_VA_EXPAND(x_) x_ // returns 1 if the argument list has fewer than two arguments (i.e. 1 or empty). otherwise returns 0. #define OMNI_VA_IS_FEWER_THAN_TWO(...) \ OMNI_VA_EXPAND(OMNI_VA_GET_ARG_64(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 /* one or 0 */, 1)) // counts the number of given arguments. due to the design of the pre-processor, if 0 arguments are given, a count of 1 // is incorrectly returned. #define OMNI_VA_COUNT(...) \ OMNI_VA_EXPAND(OMNI_VA_GET_ARG_64(__VA_ARGS__, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, \ 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, \ 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 20, 9, 8, 7, 6, \ 5, 4, 3, 2, 1, 0)) // returns the first argument. if the argument list is empty, nothing is returned. // // ("a", "b", "c") -> "a" #define OMNI_VA_FIRST(...) OMNI_VA_FIRST_(OMNI_VA_IS_FEWER_THAN_TWO(__VA_ARGS__), __VA_ARGS__) #define OMNI_VA_FIRST_(is_fewer_than_two_, ...) OMNI_VA_FIRST__(is_fewer_than_two_, __VA_ARGS__) #define OMNI_VA_FIRST__(is_fewer_than_two_, ...) OMNI_VA_FIRST__##is_fewer_than_two_(__VA_ARGS__) #define OMNI_VA_FIRST__0(...) OMNI_VA_EXPAND(OMNI_VA_FIRST___(__VA_ARGS__)) #define OMNI_VA_FIRST__1(...) __VA_ARGS__ #define OMNI_VA_FIRST___(first, ...) first // removes the first argument from the argument list, returning the remaining arguments. // // if any arguments are returned a comma is prepended to the list. // // if no arguments are returned, no comma is added. // // ("a", "b", "c") -> , "b", "c" // () -> #define OMNI_VA_COMMA_WITHOUT_FIRST(...) \ OMNI_VA_COMMA_WITHOUT_FIRST_(OMNI_VA_IS_FEWER_THAN_TWO(__VA_ARGS__), __VA_ARGS__) #define OMNI_VA_COMMA_WITHOUT_FIRST_(is_fewer_than_two_, ...) \ OMNI_VA_COMMA_WITHOUT_FIRST__(is_fewer_than_two_, __VA_ARGS__) #define OMNI_VA_COMMA_WITHOUT_FIRST__(is_fewer_than_two_, ...) \ OMNI_VA_COMMA_WITHOUT_FIRST__##is_fewer_than_two_(__VA_ARGS__) #define OMNI_VA_COMMA_WITHOUT_FIRST__0(...) OMNI_VA_EXPAND(OMNI_VA_COMMA_WITHOUT_FIRST___(__VA_ARGS__)) #define OMNI_VA_COMMA_WITHOUT_FIRST__1(...) #define OMNI_VA_COMMA_WITHOUT_FIRST___(first, ...) , __VA_ARGS__ // returns the first argument from the argument list. if the given list is empty, an empty string is returned. // // ("a", "b", "c") -> "a" // () -> "" #define OMNI_VA_FIRST_OR_EMPTY_STRING(...) \ OMNI_VA_FIRST_OR_EMPTY_STRING_(OMNI_VA_IS_FEWER_THAN_TWO(__VA_ARGS__), __VA_ARGS__) #define OMNI_VA_FIRST_OR_EMPTY_STRING_(is_fewer_than_two_, ...) \ OMNI_VA_FIRST_OR_EMPTY_STRING__(is_fewer_than_two_, __VA_ARGS__) #define OMNI_VA_FIRST_OR_EMPTY_STRING__(is_fewer_than_two_, ...) \ OMNI_VA_FIRST_OR_EMPTY_STRING__##is_fewer_than_two_(__VA_ARGS__) #define OMNI_VA_FIRST_OR_EMPTY_STRING__0(...) OMNI_VA_EXPAND(OMNI_VA_FIRST_OR_EMPTY_STRING___(__VA_ARGS__)) #define OMNI_VA_FIRST_OR_EMPTY_STRING__1(...) " " __VA_ARGS__ #define OMNI_VA_FIRST_OR_EMPTY_STRING___(first, ...) first
omniverse-code/kit/include/omni/core/OmniAttr.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Helpers related to define interfaces and interface attributes. #pragma once #include <type_traits> #ifdef OMNI_BIND // OMNI_ATTR("enum,prefix=eState") --> __attribute__((annotate("omni_attr:enum,prefix=eState"))) # define OMNI_ATTR(attrs_) __attribute__((annotate("omni_attr:" attrs_))) #else //! Provides additional contextual information to the 'omni.bind' code generation tool. This information is used to //! generated efficient bindings to the interface. //! //! For example, if an ABI method accepts a const pointer than cannot be null, the method will be bound to an API layer //! method that accepts a C++ reference. //! //! See @rstdoc{../../../../docs/omni.bind/omni.bind} for an overview of `OMNI_ATTR` and the <i>omni.bind</i> tool. # define OMNI_ATTR(p0_) #endif //! Macro to access generated API of an interface. #define OMNI_GENERATED_API(iface_) omni::core::Generated<iface_##_abi> //! Used in cases when defined interface provides an overload for a function from generated API. //! //! In those situations generated API becomes unavailable and member function from generated API //! has to be introduced explicitly: //! //! @code{.cpp} //! OMNI_DEFINE_INTERFACE_API(omni::windowing::IWindow) //! { //! public: //! OMNI_USE_FROM_GENERATED_API(omni::windowing::IWindow, overloadedFunction) //! //! void overloadedFunction(int overloadedArg) //! { //! } //! }; //! @endcode //! #define OMNI_USE_FROM_GENERATED_API(iface_, func_) using OMNI_GENERATED_API(iface_)::func_; //! Used to forward declare an interface. //! //! The given class name must not include namespaces. Rather, this macro must be invoked within the proper namespace. //! //! When _defining_ an interface in a header file, either @ref OMNI_DECLARE_INTERFACE() or @ref //! OMNI_DEFINE_INTERFACE_API() should be invoked. #define OMNI_DECLARE_INTERFACE(iface_) \ class iface_##_abi; \ /** \ Typedef for API wrapper of iface_##_abi \ @see omni::core::Api, iface_##_abi \ */ \ using iface_ = omni::core::Api<iface_##_abi>; //! Used to extend the <i>omni.bind</i> generated API layer. //! //! This macro handles the `class` line of a C++ class. Use this macro in the following manner: //! //! @code{.cpp} //! OMNI_DEFINE_INTERFACE_API(omni::windowing::IWindow) //! { //! public: //! inline ObjectPtr<input::IKeyboardOnEventConsumer> getConsumer() noexcept //! { /* ... */ } //! }; //! @endcode #define OMNI_DEFINE_INTERFACE_API(iface_) \ /** \ * Implements the generated API layer for iface_ \ */ \ template <> \ class omni::core::Api<iface_##_abi> : public OMNI_GENERATED_API(iface_) //! @def OMNI_BIND_INCLUDE_INTERFACE_DECL //! //! By defining this macro before including a header generated by *omni.bind*, only the declaration of any generated //! boiler-plate code is included. //! //! @see OMNI_BIND_INCLUDE_INTERFACE_IMPL //! @def OMNI_BIND_INCLUDE_INTERFACE_IMPL //! //! By defining this macro before including a header generated by *omni.bind*, only the implementations of any //! generated boiler-plate code is included. //! //! @see OMNI_BIND_INCLUDE_INTERFACE_DECL namespace carb { template <typename T> class ObjectPtr; // forward declaration needed by ObjectParam } namespace omni { namespace core { //! Templated class to store generated code from the <i>omni.bind</i> code generator. //! //! See @ref omni::core::Api for how this class is used. template <typename T> class Generated : public T { }; //! The API layer of an Omniverse interface. //! //! This template is the main construct used by users to interact with Omniverse interfaces. //! //! This template inherits from the @ref omni::core::Generated template which in turn inherits from another @ref //! omni::core::Api template instantiation. Eventually, we'll find that the root base class is @ref IObject_abi. For //! example, @ref omni::log::ILog has the following inheritance chain: //! //! Api<omni::log::ILog_abi> // interface author bindings for ILog //! Generated<omni::log::ILog_abi> // omni.bind (generated) bindings for ILog //! omni::log::ILog_abi // raw ABI (defined by author) //! Api<omni::core::IObject_abi> // hand-written bindings to IObject //! Generated<omni::core::IObject_abi> // omni.bind (generated) bindings for IObject //! omni::core::IObject_abi // raw ABI //! //! where: //! //! @code{.cpp} //! namespace omni::log //! { //! using ILog = Api<omni::log::ILog_abi>; //! } //! @endcode //! //! Each point in the inheritance chain serves a different component in the system: //! //! - The `_abi` layer defines the raw ABI. The `_abi` object's methods define how the interface can be used across DLL //! boundaries. Defining a stable ABI is the primary goal of Omniverse interfaces. In practice, methods in the ABI //! have many restrictions. See @oni_overview for details. //! //! - The @ref Generated template gives the <i>omni.bind</i> code generation tool a place to create a template //! specialization that defines boiler-plate methods that make using the ABI easier. Again, see @oni_overview for //! details. //! //! - The @ref Api template gives the interface author a place to create wrappers (via specialization) to make using the //! interface easier. Since the @ref Api layer inherits from the @ref Generated layer, specializing the the @ref Api //! layer allows interface authors to augment the boiler-plate code generated by <i>omni.bind</i>. Interfaces authors //! should use the @ref OMNI_DEFINE_INTERFACE_API() macro to specialize the @ref Api layer. //! //! Expected usage is: //! //! @code{.cpp} //! // forward declare the interface. this sets up the Api<> typedef. //! OMNI_DECLARE_INTERFACE(foo::IMyInteface); //! //! namespace foo { //! class IMyInterface_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("foo.IMyInterface")> //! { //! protected: //! void myMethod_abi(uint32_t x) noexcept = 0; //! // ... //! }; //! } // namespace foo //! //! // include code generated by the omni.bind tool. //! // this include should be outside of any namespace {} blocks //! #include "IMyInterface.gen.h" //! //! // use OMNI_DEFINE_INTERFACE_API() to add hand-written API wrappers //! // this macro should be invoked outside of any namespace {} blocks //! OMNI_DEFINE_INTERFACE(foo::IMyInterface_abi) //! { //! void myReallyCoolMethod() { /* inline code here */ } //! // ... //! }; //! @endcode template <typename T> class Api : public Generated<T> { }; template <typename T> class ObjectPtr; // forward declaration needed by ObjectParam //! Helper object used by <i>omni.bind</i> to ease, at zero cost, the acceptance of raw and smart pointers to methods //! that wish to accept a raw pointer. //! //! This object should never be used outside of <i>omni.bind</i>. template <typename T> class ObjectParam { public: //! Accept a smart pointer of different type. template <typename Y> ObjectParam(const ObjectPtr<Y>& o, typename std::enable_if_t<std::is_base_of<T, Y>::value>* = nullptr) noexcept : m_ptr{ o.get() } { } //! Accept a Carbonite smart pointer of different type. template <typename Y> ObjectParam(const carb::ObjectPtr<Y>& o) noexcept : m_ptr{ o.get() } { } //! Accept a raw pointer. ObjectParam(T* o) noexcept : m_ptr{ o } { } //! Arrow operator. T* operator->() const noexcept { return m_ptr; } //! Access raw pointer. T* get() const noexcept { return m_ptr; } //! Returns true if the wrapped pointer is not `nullptr`. explicit operator bool() const noexcept { return m_ptr != nullptr; } private: T* m_ptr; }; } // namespace core } // namespace omni
omniverse-code/kit/include/omni/core/ModuleExports.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Helpers for defining a plugin's @ref omni::core::ModuleExports table. #pragma once #include "IObject.h" #include "../../carb/Interface.h" #include <algorithm> #include <cstring> #include <type_traits> namespace omni { namespace log { class ILog; } namespace structuredlog { class IStructuredLog; //! Registration function to install a schema with the structured logging system. //! //! @param[in] log A pointer to the global singleton structured logging system to install //! the schema in. //! @returns `true` if the schema is successfully installed or was already installed. //! @returns `false` if the schema could not be installed. This may be caused by a lack of //! available memory, or too many events have been registered in the system. using SchemaAddFn = bool (*)(IStructuredLog* log); } // namespace structuredlog namespace core { //! Unique type name for @ref omni::core::ModuleExportEntryOnModuleLoad. constexpr const char* const kModuleExportEntryTypeOnModuleLoad = "omniOnModuleLoad"; //! Unique type name for @ref omni::core::ModuleExportEntryOnModuleStarted. constexpr const char* const kModuleExportEntryTypeOnModuleStarted = "omniOnModuleStarted"; //! Unique type name for @ref omni::core::ModuleExportEntryOnModuleCanUnload. constexpr const char* const kModuleExportEntryTypeOnModuleCanUnload = "omniOnModuleCanUnload"; //! Unique type name for @ref omni::core::ModuleExportEntryOnModuleUnload. constexpr const char* const kModuleExportEntryTypeOnModuleUnload = "omniOnModuleUnload"; //! Unique type name for @ref omni::core::ModuleExportEntryITypeFactory. constexpr const char* const kModuleExportEntryTypeITypeFactory = "omniITypeFactory"; //! Unique type name for @ref omni::core::ModuleExportEntryILog. constexpr const char* const kModuleExportEntryTypeILog = "omniILog"; //! Unique type name for @ref omni::core::ModuleExportEntryLogChannel. constexpr const char* const kModuleExportEntryTypeLogChannel = "omniLogChannel"; //! Unique type name for @ref omni::core::ModuleExportEntryIStructuredLog. constexpr const char* const kModuleExportEntryTypeIStructuredLog = "omniIStructuredLog"; //! Unique type name for @ref omni::core::ModuleExportEntrySchema. constexpr const char* const kModuleExportEntryTypeSchema = "omniSchema"; //! Unique type name for @ref omni::core::ModuleExportEntryCarbClientName. constexpr const char* const kModuleExportEntryTypeCarbClientName = "carbClientName"; //! Unique type name for @ref omni::core::ModuleExportEntryCarbFramework. constexpr const char* const kModuleExportEntryTypeCarbFramework = "carbFramework"; //! Unique type name for @ref omni::core::ModuleExportEntryCarbIAssert. constexpr const char* const kModuleExportEntryTypeCarbIAssert = "carbIAssert"; //! Unique type name for @ref omni::core::ModuleExportEntryCarbILogging. constexpr const char* const kModuleExportEntryTypeCarbILogging = "carbILogging"; //! Unique type name for @ref omni::core::ModuleExportEntryCarbIProfiler. constexpr const char* const kModuleExportEntryTypeCarbIProfiler = "carbIProfiler"; //! Unique type name for @ref omni::core::ModuleExportEntryCarbIL10n. constexpr const char* const kModuleExportEntryTypeCarbIL10n = "carbIL10n"; //! Unique type name for @ref omni::core::ModuleExportEntryGetModuleDependencies. constexpr const char* const kModuleExportEntryTypeGetModuleDependencies = "omniGetModuleDependecies"; //! Per @ref omni::core::ModuleExportEntry flags. using ModuleExportEntryFlag = uint32_t; constexpr ModuleExportEntryFlag fModuleExportEntryFlagNone = 0; //!< No flags. //! Fail module load if entry could not be populated. constexpr ModuleExportEntryFlag fModuleExportEntryFlagRequired = (1 << 0); //! Helper macro for defining an entry (i.e. @ref omni::core::ModuleExportEntry) in the export table (i.e. @ref //! omni::core::ModuleExports). //! //! Implementation detail. Not intended for use outside of omni/core/ModuleExports.h. #define OMNI_MODULE_EXPORT_ENTRY_BEGIN(name_) \ struct name_ \ { \ /** <b>Unique</b> type name describing the entry. */ \ const char* type; \ /** Special flags for the entry (ex: required). */ \ ModuleExportEntryFlag flags; \ /** Size of the entry in bytes (including the header). */ \ uint32_t byteCount; \ \ /** Constructor */ \ name_(const char* t, ModuleExportEntryFlag f) \ { \ type = t; \ flags = f; \ byteCount = sizeof(*this); \ }; //! Helper macro for defining an entry in the export table. //! //! Implementation detail. Not intended for use outside of omni/core/ModuleExports.h. #define OMNI_MODULE_EXPORT_ENTRY_END(name_) \ } \ ; \ CARB_ASSERT_INTEROP_SAFE(name_); //! Define an entry in @ref omni::core::ModuleExports. //! //! Use @ref OMNI_MODULE_EXPORT_ENTRY_BEGIN and @ref OMNI_MODULE_EXPORT_ENTRY_END to define an new entry type. //! //! Each entry type must have a unique @p type (which is a `string`). //! //! @ref OMNI_MODULE_EXPORT_ENTRY_BEGIN defines the header of the entry. Note, the use of macros vs. inheritance is to //! ensure the resulting entry is @rstref{ABI-safe <abi-compatibility>} (e.g. passes @ref CARB_ASSERT_INTEROP_SAFE) //! since these entries will be passed across DLL boundaries. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntry) OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntry) static_assert(sizeof(ModuleExportEntry) == (8 + sizeof(void*)), "unexpected ModuleExportEntry size"); struct InterfaceImplementation; //! Called to load interface implementation registration information. //! //! This function is called @ref omni::core::ModuleGetExportsFn. //! //! This function will never be called concurrently with any other function in the module. //! //! The module author can assume that the module's static initialization has occurred by the time this function is //! called. //! //! The author should perform any implementation initialization in this function and return @ref kResultSuccess. If //! initialization fails, an error message should be logged (via @ref OMNI_LOG_ERROR) and an appropriate error code //! should be returned. //! //! Due to potential race conditions, the module will not have access to the @ref omni::core::ITypeFactory during this //! call but will have access to the logging and profiling systems. If @ref omni::core::ITypeFactory access is needed //! during initialization, lazy initialization is suggested (i.e. perform initialization during the first call to //! `createFn`). //! //! The memory pointed to by @p *out must remain valid until the next call to this function. using OnModuleLoadFn = Result(const InterfaceImplementation** out, uint32_t* outCount); //! @ref omni::core::ModuleExports entry to register a function to advertise the interface implementations available in //! the plugin. //! //! Use the helper @ref OMNI_MODULE_ON_MODULE_LOAD to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryOnModuleLoad) OnModuleLoadFn* onModuleLoad; //!< Module's unload function. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryOnModuleLoad) //! Registers the plugin's function who is responsible for advertising the available interface implementations in the //! plugin. //! //! @param exp_ The @ref omni::core::ModuleExports table in which the entry should be added. //! //! @param fn_ The plugin's @ref omni::core::OnModuleLoadFn who is responsible for advertising the plugin's interface //! implementations. #define OMNI_MODULE_ON_MODULE_LOAD(exp_, fn_) OMNI_RETURN_IF_FAILED(exp_->addOnModuleLoad(fn_)) //! This function will be called after the module is fully registered. It is called after @ref //! omni::core::OnModuleLoadFn successfully returns. //! //! This function will not be called again until after @ref OnModuleUnloadFn has completed and the module has been fully //! unloaded and reloaded. //! //! The owning @ref omni::core::ITypeFactory can be safely accessed in this function. //! //! A interface implementation's `createFn` can be called concurrently with this function. //! //! An interface implementation's `createFn` can be called before this function is called, as such: //! //! - Move critical module initialization to @ref omni::core::OnModuleLoadFn. //! //! - If some initialization cannot be performed in @ref omni::core::OnModuleLoadFn (due to @ref //! omni::core::ITypeFactory not being accessible), perform lazy initialization in `createFn` (in a thread-safe //! manner). using OnModuleStartedFn = void(); //! @ref omni::core::ModuleExports entry to register a function to be called after the plugin has loaded. //! //! Use the helper @ref OMNI_MODULE_ON_MODULE_STARTED to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryOnModuleStarted) OnModuleStartedFn* onModuleStarted; //!< Module function to call once the module is loaded. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryOnModuleStarted) //! Registers the plugin's function that will be called once the plugin is loaded. See //! @ref omni::core::OnModuleStartedFn for threading consideration with this function. //! //! @param exp_ The @ref omni::core::ModuleExports table in which the entry should be added. //! //! @param fn_ The plugin's @ref omni::core::OnModuleStartedFn to be called after the plugin is loaded. #define OMNI_MODULE_ON_MODULE_STARTED(exp_, fn_) OMNI_RETURN_IF_FAILED(exp_->addOnModuleStarted(fn_)) //! Called to determine if the module can be unloaded. //! //! Return `true` if it is safe to unload the module. It is up to the module to determine what "safe" means, though in //! general, it is expected that "safe" means that none of the objects created from the module's `createFn`'s are still //! alive. //! //! This function will never be called while another thread is calling one of this module's `createFn` functions, during //! @ref omni::core::OnModuleLoadFn, or during @ref omni::core::OnModuleStartedFn. //! //! @ref omni::core::OnModuleCanUnloadFn <u>must not</u> access the owning @ref omni::core::ITypeFactory. @ref //! omni::core::ITypeFactory is unable to prevent this, thus, if @ref omni::core::OnModuleCanUnloadFn does access @ref //! omni::core::ITypeFactory (either directly or indirectly) there are no safety guards in place and undefined behavior //! will result. //! //! If the module returns `true` from this function, @ref omni::core::OnModuleUnloadFn will be called. If `false` is //! returned, @ref omni::core::OnModuleUnloadFn will not be called. //! //! If this function returns `false`, it may be called again. If `true` is returned, the module will be unloaded and //! this function will not be called until the module is loaded again (if ever). using OnModuleCanUnloadFn = bool(); //! @ref omni::core::ModuleExports entry to register a function to determine if the module can be unloaded. //! //! Use the helper @ref OMNI_MODULE_ON_MODULE_CAN_UNLOAD to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryOnModuleCanUnload) OnModuleCanUnloadFn* onModuleCanUnload; //!< Module function to call to see if the module can be unloaded. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryOnModuleCanUnload) //! Registers the plugin's function that determines if the plugin can be unloaded. See @ref //! omni::core::OnModuleCanUnloadFn for details. //! //! @param exp_ The @ref omni::core::ModuleExports table in which the entry should be added. //! //! @param fn_ The plugin's @ref omni::core::OnModuleCanUnloadFn to be called after the plugin is loaded. #define OMNI_MODULE_ON_MODULE_CAN_UNLOAD(exp_, fn_) OMNI_RETURN_IF_FAILED(exp_->addOnModuleCanUnload(fn_)) //! Called when the module is about to be unloaded. //! //! This function is called after @ref OnModuleCanUnloadFn returns `true`. //! //! The module is expected to clean-up any external references to code within the module. For example, unregistering //! asset types. //! //! Any registered implementations from this module will have already been unregistered by the time this function is //! called. //! //! This function must never fail. //! //! It is safe to access the owning @ref omni::core::ITypeFactory. //! //! Attempting to load the module within @ref OnModuleLoadFn may result in deadlock. It is safe for other threads to //! attempt the load the module during @ref OnModuleUnloadFn, however, it is not safe for @ref OnModuleUnloadFn to //! attempt to load the module. //! //! No other module functions will be called while this function is active. //! //! @ref omni::core::ITypeFactory implements the following unload pseudo-code: //! //! @code{.cpp} //! if (module->canUnload()) { //! factory->unregisterModuleTypes(module); //! module->onUnload(); //! os->unloadDll(module); //! } //! @endcode using OnModuleUnloadFn = void(); //! @ref omni::core::ModuleExports entry to register a function to be called when the plugin is unloaded. //! //! Use the helper @ref OMNI_MODULE_ON_MODULE_UNLOAD to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryOnModuleUnload) OnModuleUnloadFn* onModuleUnload; //!< Module function to call to clean-up the module during unload. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryOnModuleUnload) //! Registers the plugin's function who is responsible for cleaning up the plugin when the plugin is being unloaded. //! //! @param exp_ The @ref omni::core::ModuleExports table in which the entry should be added. //! //! @param fn_ The plugin's @ref omni::core::OnModuleUnloadFn who is responsible for cleaning up the plugin. #define OMNI_MODULE_ON_MODULE_UNLOAD(exp_, fn_) OMNI_RETURN_IF_FAILED(exp_->addOnModuleUnload(fn_)) //! Forward declaration for omni::core::ITypeFactory. OMNI_DECLARE_INTERFACE(ITypeFactory) //! @ref omni::core::ModuleExports entry to access @ref omni::core::ITypeFactory. //! //! Use the helper @ref OMNI_MODULE_SET_EXPORTS to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryITypeFactory) ITypeFactory** typeFactory; //!< Pointer to the module's type factory pointer. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryITypeFactory) //! @ref omni::core::ModuleExports entry to access @ref omni::log::ILog. //! //! Use the helper @ref OMNI_MODULE_SET_EXPORTS to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryILog) log::ILog** log; //!< Pointer to the module's log pointer. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryILog) //! @ref omni::core::ModuleExports entry to add a logging channel. //! //! Use the helper @ref OMNI_MODULE_ADD_LOG_CHANNEL to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryLogChannel) const char* name; //!< Name of the channel. int32_t* level; //!< Pointer to module memory where the channel's logging level is stored. const char* description; //!< Description of the channel (for humans). OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryLogChannel) //! Adds a log channel to the logging system. The channel will be removed when the module is unloaded. //! //! @p name_ and @p level_ must not be `nullptr`. //! //! The given pointers must remain valid for the lifetime of the module. //! //! Rather than calling this macro, use @ref OMNI_LOG_ADD_CHANNEL to both declare a channel and add it to the @ref //! omni::core::ModuleExports table. //! //! @param exp_ The @ref omni::core::ModuleExports table in which the entry should be added. //! //! @param name_ The name of the channel. Must not be `nullptr`. //! //! @param level_ Pointer to plugin memory where the logging system can store the channel's logging threshold. Shouldn't //! be `nullptr`. //! //! @param description_ Description of the channel. Useful for debugging and UIs. Must not be `nullptr`. #define OMNI_MODULE_ADD_LOG_CHANNEL(exp_, name_, level_, description_) \ OMNI_RETURN_IF_FAILED(exp_->addLogChannel(name_, level_, description_)) //! @ref omni::core::ModuleExports entry to interop with @ref g_carbClientName. //! //! Use the helper @ref OMNI_MODULE_SET_EXPORTS to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryCarbClientName) const char* clientName; //!< The client name OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryCarbClientName) //! Requires that the owning @ref omni::core::ITypeFactory provides a Carbonite client name: @ref g_carbClientName. //! //! By default, the owning @ref omni::core::ITypeFactory will try to populate the module's @ref g_carbClientName, but //! will silently fail if it cannot. This macro tells the @ref omni::core::ITypeFactory to fail the module's load. #define OMNI_MODULE_REQUIRE_CARB_CLIENT_NAME(exp_) \ OMNI_RETURN_IF_FAILED(out->requireExport(omni::core::kModuleExportEntryTypeCarbClientName)) //! @ref omni::core::ModuleExports entry to access @ref omni::structuredlog::IStructuredLog. //! //! Use the helper @ref OMNI_MODULE_SET_EXPORTS to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryIStructuredLog) omni::structuredlog::IStructuredLog** structuredLog; //!< Pointer to module structured log pointer. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryIStructuredLog) //! @ref omni::core::ModuleExports entry to add a new structured logging schema to be registered. //! //! Use the helper @ref OMNI_MODULE_ADD_STRUCTURED_LOG_SCHEMA() to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntrySchema) omni::structuredlog::SchemaAddFn schemaAddFn; //!< the schema registration function to run after core startup. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntrySchema) //! adds a new schema to be registered after core startup. //! //! @p fn_ must not be `nullptr`. //! //! This does not need to be called directly. All the schemas included in a module will be //! implicitly added by @ref OMNI_MODULE_SET_EXPORTS(). //! //! @param exp_ The @ref omni::core::ModuleExports table in which the schema should be added. //! @param fn_ The schema registration function that will be stored. This will be executed //! once the core's startup has completed. This must not be nullptr. #define OMNI_MODULE_ADD_STRUCTURED_LOG_SCHEMA(exp_, fn_) OMNI_RETURN_IF_FAILED(exp_->addStructuredLogSchema(fn_)) } // namespace core } // namespace omni namespace carb { struct Framework; namespace assert { struct IAssert; } // namespace assert namespace logging { struct ILogging; } // namespace logging namespace profiler { struct IProfiler; } // namespace profiler namespace l10n { struct IL10n; struct LanguageTable; struct LanguageIdentifier; } // namespace l10n } // namespace carb namespace omni { namespace core { #ifndef DOXYGEN_BUILD namespace detail { //! Carbonite logging callback. using CarbLogFn = void (*)(const char* source, int32_t level, const char* fileName, const char* functionName, int lineNumber, const char* fmt, ...); //! Carbonite logging threshold callback. using CarbLogLevelFn = void(int32_t); //! Carbonite localization callback. using CarbLocalizeStringFn = const char*(CARB_ABI*)(const carb::l10n::LanguageTable* table, uint64_t id, const carb::l10n::LanguageIdentifier* language); } // namespace detail #endif //! @ref omni::core::ModuleExports entry to interop with @ref carb::Framework. //! //! Use the helper @ref OMNI_MODULE_SET_EXPORTS to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryCarbFramework) carb::Framework** framework; //!< Pointer to the module's @ref g_carbFramework pointer. carb::Version version; //!< Version of the @ref carb::Framework the module expects. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryCarbFramework) //! Requires that the owning @ref omni::core::ITypeFactory provides a Carbonite @ref carb::Framework @ref //! g_carbFramework. //! //! By default, the owning @ref omni::core::ITypeFactory will try to populate the module's @ref g_carbFramework, but //! will silently fail if it cannot. This macro tells the @ref omni::core::ITypeFactory to fail the module's load. #define OMNI_MODULE_REQUIRE_CARB_FRAMEWORK(exp_) \ OMNI_RETURN_IF_FAILED(out->requireExport(omni::core::kModuleExportEntryTypeCarbFramework)) //! @ref omni::core::ModuleExports entry to interop with @ref carb::assert::IAssert. //! //! Use the helper @ref OMNI_MODULE_SET_EXPORTS to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryCarbIAssert) carb::assert::IAssert** assert; //!< Pointer to the module's @ref g_carbAssert pointer. carb::InterfaceDesc interfaceDesc; //!< Required version of @ref carb::assert::IAssert. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryCarbIAssert) //! Requires that the owning @ref omni::core::ITypeFactory provides a Carbonite @ref carb::assert::IAssert @ref //! g_carbAssert. //! //! By default, the owning @ref omni::core::ITypeFactory will try to populate the module's @ref g_carbAssert, but will //! silently fail if it cannot. This macro tells the @ref omni::core::ITypeFactory to fail the module's load. #define OMNI_MODULE_REQUIRE_CARB_IASSERT(exp_) \ OMNI_RETURN_IF_FAILED(out->requireExport(omni::core::kModuleExportEntryTypeCarbIAssert)) //! @ref omni::core::ModuleExports entry to interop with @ref carb::logging::ILogging. //! //! Use the helper @ref OMNI_MODULE_SET_EXPORTS to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryCarbILogging) carb::logging::ILogging** logging; //!< Pointer to the module's @ref g_carbLogging pointer. detail::CarbLogFn* logFn; //!< Pointer to the module's @ref g_carbLogFn function pointer. detail::CarbLogLevelFn* logLevelFn; //!< Pointer to a module function which can set the log level. int32_t* logLevel; //!< Pointer to module memory where the logging threshold is stored. carb::InterfaceDesc interfaceDesc; //!< Required version of @ref carb::logging::ILogging. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryCarbILogging) //! Requires that the owning @ref omni::core::ITypeFactory provides a Carbonite @ref carb::logging::ILogging @ref //! g_carbLogging. //! //! By default, the owning @ref omni::core::ITypeFactory will try to populate the module's @ref g_carbLogging, but will //! silently fail if it cannot. This macro tells the @ref omni::core::ITypeFactory to fail the module's load. #define OMNI_MODULE_REQUIRE_CARB_ILOGGING(exp_) \ OMNI_RETURN_IF_FAILED(out->requireExport(omni::core::kModuleExportEntryTypeCarbILogging)) //! @ref omni::core::ModuleExports entry to interop with @ref carb::profiler::IProfiler. //! //! Use the helper @ref OMNI_MODULE_SET_EXPORTS to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryCarbIProfiler) carb::profiler::IProfiler** profiler; //!< Pointer to the module's @ref g_carbProfiler. carb::InterfaceDesc interfaceDesc; //!< Required version of @ref carb::profiler::IProfiler. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryCarbIProfiler) //! Requires that the owning @ref omni::core::ITypeFactory provides a Carbonite @ref carb::profiler::IProfiler @ref //! g_carbProfiler. //! //! By default, the owning @ref omni::core::ITypeFactory will try to populate the module's @ref g_carbProfiler, but will //! silently fail if it cannot. This macro tells the @ref omni::core::ITypeFactory to fail the module's load. #define OMNI_MODULE_REQUIRE_CARB_IPROFILER(exp_) \ OMNI_RETURN_IF_FAILED(out->requireExport(omni::core::kModuleExportEntryTypeCarbIProfiler)) //! @ref omni::core::ModuleExports entry to interop with @ref carb::l10n::IL10n. //! //! Use the helper @ref OMNI_MODULE_SET_EXPORTS to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryCarbIL10n) carb::l10n::IL10n** localization; //!< Pointer to the module's @ref g_carbLocalization. detail::CarbLocalizeStringFn* localizationFn; //!< Pointer to the module's @ref g_localizationFn function pointer. carb::InterfaceDesc interfaceDesc; //!< Required version of @ref carb::l10n::IL10n. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryCarbIL10n) //! Called to get dependencies from the module. using GetModuleDependenciesFn = Result(carb::InterfaceDesc** out, size_t* outCount); //! @ref omni::core::ModuleExports entry to register a function to advertise the interface implementations available in //! the plugin. //! //! Use the helper @ref OMNI_MODULE_GET_MODULE_DEPENDENCIES to add this entry. OMNI_MODULE_EXPORT_ENTRY_BEGIN(ModuleExportEntryGetModuleDependencies) GetModuleDependenciesFn* getModuleDependencies; //!< Module's dependencies information function. OMNI_MODULE_EXPORT_ENTRY_END(ModuleExportEntryGetModuleDependencies) //! Registers the function responsible for advertising the plugin's interface dependencies. //! //! @param exp_ The @ref omni::core::ModuleExports table in which the entry should be added. //! //! @param fn_ The plugin's @ref omni::core::GetModuleDependenciesFn responsible for advertising //! plugin's interface dependencies. #define OMNI_MODULE_GET_MODULE_DEPENDENCIES(exp_, fn_) OMNI_RETURN_IF_FAILED(exp_->addGetModuleDependencies(fn_)) //! Requires that the owning @ref omni::core::ITypeFactory provides a Carbonite @ref carb::l10n::IL10n @ref //! g_carbLocalization. //! //! By default, the owning @ref omni::core::ITypeFactory will try to populate the module's @ref g_carbLocalization, but //! will silently fail if it cannot. This macro tells the @ref omni::core::ITypeFactory to fail the module's load. #define OMNI_MODULE_REQUIRE_CARB_IL10N(exp_) \ OMNI_RETURN_IF_FAILED(out->requireExport(omni::core::kModuleExportEntryTypeCarbIL10n)) //! Magic number for sanity checking of @ref omni::core::ModuleExports. constexpr uint16_t kModuleExportsMagic = 0x766e; // { 'n', 'v' } //! Binary layout of @ref omni::core::ModuleExports. This should be incremented if the fields in @ref //! omni::core::ModuleExports change. //! //! Great care must be taken when changing this version, as it may prevent existing modules from loading without //! recompilation. constexpr uint16_t kModuleExportsVersion = 1; //! Entities exported by a module for both use and population by @ref omni::core::ITypeFactory. //! //! Rather than a fixed data structure to communicate which functions a DLL exports, Omniverse modules use a data driven //! approach to convey both what functionality the module brings to the table and the needs of the module in order to //! operate correctly. //! //! The data members in this structure, while public, should be treated as opaque. Hiding the data members (i.e. making //! them private) would violate C++11's "standard layout" requirements, thus making this struct not //! @rstref{ABI safe <abi-compatibility>}. //! //! Avoid calling methods of this struct directly. Rather call the helper macros. For example, call @ref //! OMNI_MODULE_ON_MODULE_LOAD() rather than @ref ModuleExports::addOnModuleLoad(). Calling the former allows future //! implementations leeway to make your awesome-futuristic interface compatible with older <i>carb.dll</i>'s. //! //! Unless otherwise noted, pointers provided to this object are expected to be valid for the lifetime of the module. //! //!@see @oni_overview for an overview of plugin loading (explicit module loading). struct ModuleExports { //! Magic number. Used for sanity checking. Should be kModuleExportsMagic. uint16_t magic; //! Version of this structure. Changing this will break most modules. //! //! Version 1 of this structure defines a key/value database of module capabilities and requirements. //! //! Adding or removing a key from this database does not warrant a version bump. Rather a version bump is required //! if: //! //! - Any field in this struct change its meaning. //! - Fields are removed from this struct (hint: never remove a field, rather, deprecate it). //! //! The "keys" in the key/value pairs are designed such that a "key" has a known value. A "key"'s meaning can never //! change. If a change is desired, a new key is created. uint16_t version; //! Size of this structure. Here the size is `sizeof(ModuleExports)` + any extra space allocated at the end of this //! struct for @ref ModuleExportEntry's. uint32_t byteCount; //! Pointer to the first byte of the first @ref ModuleExportEntry. uint8_t* exportsBegin; //! Pointer to the byte after the end of the last @ref ModuleExportEntry. The module is expected to update this //! field. uint8_t* exportsEnd; //! Returns @ref kResultSuccess if the given version is supported, an error otherwise. //! //! This method is called from the module. Result checkVersion(uint16_t moduleMagic, uint16_t moduleVersion) { // we can't log here, since we're to early in the module load process for logging to be available. pass back the // magic number and version we were expecting so omni::core::ITypeFactory can print an appropriate message if // the checks below fail. std::swap(magic, moduleMagic); std::swap(version, moduleVersion); if (magic != moduleMagic) { return kResultVersionParseError; } if (version != moduleVersion) { return kResultVersionCheckFailure; } return kResultSuccess; } //! Adds the given export entry. Return `false` if there is not enough space. Result add(const ModuleExportEntry* entry) { uint32_t neededSize = uint32_t(exportsEnd - exportsBegin) + sizeof(ModuleExports) + entry->byteCount; if (neededSize > byteCount) { return kResultInsufficientBuffer; } std::memcpy(exportsEnd, entry, entry->byteCount); exportsEnd += entry->byteCount; return kResultSuccess; } //! Returns a pointer to the first entry of the given type. Return `nullptr` if no such entry exists. ModuleExportEntry* find(const char* type) { if (!type) { return nullptr; } uint8_t* p = exportsBegin; while (p < exportsEnd) { auto entry = reinterpret_cast<ModuleExportEntry*>(p); if (0 == strcmp(type, entry->type)) { return entry; } p += entry->byteCount; } return nullptr; } //! Finds the first entry of the given type and sets it as "required". Returns an error if no such entry could be //! found. Result requireExport(const char* type) { auto entry = find(type); if (entry) { entry->flags |= fModuleExportEntryFlagRequired; return kResultSuccess; } return kResultNotFound; } //! See @ref OMNI_MODULE_ON_MODULE_LOAD. Result addOnModuleLoad(OnModuleLoadFn* fn, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryOnModuleLoad entry{ kModuleExportEntryTypeOnModuleLoad, flags }; entry.onModuleLoad = fn; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_ON_MODULE_STARTED. Result addOnModuleStarted(OnModuleStartedFn* fn, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryOnModuleStarted entry{ kModuleExportEntryTypeOnModuleStarted, flags }; entry.onModuleStarted = fn; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_ON_MODULE_CAN_UNLOAD. Result addOnModuleCanUnload(OnModuleCanUnloadFn* fn, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryOnModuleCanUnload entry{ kModuleExportEntryTypeOnModuleCanUnload, flags }; entry.onModuleCanUnload = fn; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_ON_MODULE_UNLOAD. Result addOnModuleUnload(OnModuleUnloadFn* fn, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryOnModuleUnload entry{ kModuleExportEntryTypeOnModuleUnload, flags }; entry.onModuleUnload = fn; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_SET_EXPORTS. Result addITypeFactory(ITypeFactory** typeFactory, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryITypeFactory entry{ kModuleExportEntryTypeITypeFactory, flags }; entry.typeFactory = typeFactory; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_SET_EXPORTS. Result addILog(log::ILog** log, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryILog entry{ kModuleExportEntryTypeILog, flags }; entry.log = log; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_LOG_ADD_CHANNEL. Result addLogChannel(const char* channelName, int32_t* level, const char* description, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryLogChannel entry{ kModuleExportEntryTypeLogChannel, flags }; entry.name = channelName; entry.level = level; entry.description = description; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_SET_EXPORTS. Result addIStructuredLog(omni::structuredlog::IStructuredLog** strucLog, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryIStructuredLog entry{ kModuleExportEntryTypeIStructuredLog, flags }; entry.structuredLog = strucLog; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_ADD_STRUCTURED_LOG_SCHEMA(). Result addStructuredLogSchema(omni::structuredlog::SchemaAddFn fn, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntrySchema entry{ kModuleExportEntryTypeSchema, flags }; entry.schemaAddFn = fn; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_SET_EXPORTS. Result addCarbClientName(const char* clientName, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryCarbClientName entry{ kModuleExportEntryTypeCarbClientName, flags }; entry.clientName = clientName; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_SET_EXPORTS. Result addCarbFramework(carb::Framework** carbFramework, const carb::Version& ver, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryCarbFramework entry{ kModuleExportEntryTypeCarbFramework, flags }; entry.framework = carbFramework; entry.version = ver; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_SET_EXPORTS. Result addCarbIAssert(carb::assert::IAssert** assert, const carb::InterfaceDesc& interfaceDesc, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryCarbIAssert entry{ kModuleExportEntryTypeCarbIAssert, flags }; entry.assert = assert; entry.interfaceDesc = interfaceDesc; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_SET_EXPORTS. Result addCarbILogging(carb::logging::ILogging** logging, detail::CarbLogFn* logFn, detail::CarbLogLevelFn* logLevelFn, int32_t* logLevel, const carb::InterfaceDesc& interfaceDesc, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryCarbILogging entry{ kModuleExportEntryTypeCarbILogging, flags }; entry.logging = logging; entry.logFn = logFn; entry.logLevelFn = logLevelFn; entry.logLevel = logLevel; entry.interfaceDesc = interfaceDesc; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_SET_EXPORTS. Result addCarbIProfiler(carb::profiler::IProfiler** profiler, const carb::InterfaceDesc& interfaceDesc, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryCarbIProfiler entry{ kModuleExportEntryTypeCarbIProfiler, flags }; entry.profiler = profiler; entry.interfaceDesc = interfaceDesc; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_SET_EXPORTS. Result addCarbIL10n(carb::l10n::IL10n** localization, detail::CarbLocalizeStringFn* localizationFn, const carb::InterfaceDesc& interfaceDesc, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryCarbIL10n entry{ kModuleExportEntryTypeCarbIL10n, flags }; entry.localization = localization; entry.localizationFn = localizationFn; entry.interfaceDesc = interfaceDesc; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } //! See @ref OMNI_MODULE_GET_MODULE_DEPENDENCIES. Result addGetModuleDependencies(GetModuleDependenciesFn* fn, ModuleExportEntryFlag flags = fModuleExportEntryFlagNone) { ModuleExportEntryGetModuleDependencies entry{ kModuleExportEntryTypeGetModuleDependencies, flags }; entry.getModuleDependencies = fn; return add(reinterpret_cast<ModuleExportEntry*>(&entry)); } }; CARB_ASSERT_INTEROP_SAFE(ModuleExports); static_assert(sizeof(ModuleExports) == (8 + (2 * sizeof(void*))), "unexpected ModuleExports size. do not change ModuleExports?"); //! Type of @ref kModuleGetExportsName. See @ref omniModuleGetExports. using ModuleGetExportsFn = Result(ModuleExports* out); //! Name of the module's exported function that is of type @ref omni::core::ModuleGetExportsFn. See @ref //! omniModuleGetExports. constexpr const char* const kModuleGetExportsName = "omniModuleGetExports"; } // namespace core } // namespace omni #ifdef DOXYGEN_BUILD //! @brief Main entry point into a module. Returns the list of capabilities and requirements for the module. //! //! This is the first function called in a module by @ref omni::core::ITypeFactory. Information is passed between the //! module and the @ref omni::core::ITypeFactory via @ref omni::core::ModuleExports. //! //! @ref omni::core::ModuleExports is an @rstref{ABI-safe <abi-compatibility>} table used to store the module's //! requirements. For example, the module may require that the Carbonite @ref carb::Framework is present. //! //! This table is also used to communicate capabilities of the module. For example, the module is able to denote which //! logging channels it wishes to register. //! //! See @oni_overview for more details on how this function is used. omni::core::Result omniModuleGetExports(omni::core::ModuleExports* out); #endif
omniverse-code/kit/include/omni/core/ITypeFactory.gen.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/OmniAttr.h> #include <omni/core/Interface.h> #include <omni/core/ResultError.h> #include <functional> #include <utility> #include <type_traits> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! A mapping from type id's to implementations. //! //! This object maps type id's to concrete implementations. The type id's can represent interface ids or implementation //! ids. //! //! Register types with registerInterfaceImplementationsFromModule() and registerInterfaceImplementations(). //! //! Instantiate types with omni::core::createType(). This is the primary way Omniverse applications are able to //! instantiate concrete implementations of @rstref{ABI-safe <abi-compatibility>} interfaces. See //! omni::core::createType() for a helpful wrapper around omni::core::ITypeFactory::createType(). //! //! In practice, there will be a single ITypeFactory active in the process space (accessible via //! omniGetTypeFactoryWithoutAcquire()). However, @ref omni::core::ITypeFactory is not inherently a singleton, and as //! such multiple instantiations of the interface may exists. This can be used to create private type trees. //! //! Unless otherwise noted, all methods in this interface are thread safe. template <> class omni::core::Generated<omni::core::ITypeFactory_abi> : public omni::core::ITypeFactory_abi { public: OMNI_PLUGIN_INTERFACE("omni::core::ITypeFactory") //! Instantiates a concrete type. //! //! The given type id can be an interface or implementation id. //! //! If the id is an interface id, the following rules are followed: //! //! - If the application specified a default implementation, that implementation will be instantiated. //! //! - Otherwise, the first registered implementation of the interface is instantiated. If multiple versions of the //! implementation exist, the highest version is picked. //! //! - implVersion must be 0 since interfaces are not versioned (only implementations are versioned). If implVersion //! is not 0, nullptr is returned. //! //! - If a default module name was provided by the app, the rules above will only be applied to implementations from //! the specified default module. //! //! If the id is an implementation id, the followings rules apply: //! //! - If version is 0, the highest version of the implementation is returned. //! //! - If version is not 0, the returned object is the specified version of the implementation. If such a version //! does not exists, nullptr is returned. If multiple implementations exists with the same version, the //! implementation registered first is instantiated. //! //! In both cases above, if moduleName given, the rules above are followed by only looking at implementations from //! the specified module. If no match is found, nullptr is returned. //! //! If moduleName has not been loaded, it will be loaded and its implementations registered. //! //! If moduleName is nullptr, the rules above are applied across all loaded modules. //! //! This method is thread safe. omni::core::ObjectPtr<omni::core::IObject> createType(omni::core::TypeId id, const char* moduleName, uint32_t implVersion) noexcept; //! Registers types from the given module. //! //! If the module is currently loaded, it will not be reloaded and kResultSuccess is returned. //! //! Modules (e.g. .dll or .so) may contain one or many implementations of one or many interfaces. When registering a //! module with the type factory, a function, whose name is described by 'kModuleGetExportsName', is found and //! invoked. Let's assume the exported function name is "omniModuleGetExports". //! //! "omniModuleGetExports" returns a key/value database of the module's capabilities and the module's requirements. //! Some things to note about this database: //! //! - The module's requirements can be marked as optional. //! //! - The module's capabilities can be ignored by ITypeFactory. //! //! These properties allow ITypeFactory and the module to find an intersection of desired functionality in a data //! driven manner. If one party's required needs are not met, the module fails to load (e.g. an appropriate //! omni::core::Result is returned). //! //! It is expected the module has entries in the key/value database describing the functions ITypeFactory should //! call during the loading process. The most important of these entries is the one defined by //! OMNI_MODULE_ON_MODULE_LOAD(), which points to the function ITypeFactory should call to get a list of //! implementations in the module. ITypeFactory invokes exports from the module in the following pattern: //! //! .--------------------------------------------------------------------------------------------------------------. //! | -> Time -> | //! |--------------------------------------------------------------------------------------------------------------| //! | omniModuleGetExports | onLoad (req.) | onStarted (optional) | onCanUnload (optional) | onUnload (optional) | //! | | | impl1->createFn | | | //! | | | impl2->createFn | | | //! | | | impl1->createFn | | | //! \--------------------------------------------------------------------------------------------------------------/ //! //! Above, functions in the same column can be called concurrently. It's up to the module to make sure such call //! patterns are thread safe within the module. //! //! onCanUnload and createFn can be called multiple times. All other functions are called once during the lifecycle //! of a module. //! //! \see omni/core/ModuleExports.h. //! \see onModuleLoadFn //! \see onModuleStartedFn //! \see onModuleCanUnloadFn //! \see onModuleUnloadFn //! //! //! The module can be explicitly unloaded with unregisterInterfaceImplementationsFromModule(). //! //! Upon destruction of this ITypeFactory, unregisterInterfaceImplementationsFromModule is called for each loaded //! module. If the ITypeFactory destructor's call to unregisterInterfaceImplementationsFromModule fails to safely //! unload a module (via the module's onModuleCanUnload and onModuleUnload), an attempt will be made to //! forcefully/unsafely unload the module. //! //! The given module name must not be nullptr. //! //! This method is thread safe. Modules can be loaded in parallel. //! //! \returns Returns kResultSuccess if the module is loaded (either due to this function or a previous call). //! Otherwise, an error is returned. omni::core::Result registerInterfaceImplementationsFromModule(const char* moduleName, omni::core::TypeFactoryLoadFlags flags) noexcept; //! Unregisters all types registered from the given module. //! //! Unregistering a module may fail if the module does not belief it can safely be unloaded. This is determined by //! OMNI_MODULE_ON_MODULE_CAN_UNLOAD(). //! //! If unregistration does succeed, the given module will be unloaded from the process space. //! //! Upon destruction of this ITypeFactory, unregisterInterfaceImplementationsFromModule is called for each loaded //! module. If the ITypeFactory destructor's call to unregisterInterfaceImplementationsFromModule fails to safely //! unload a module (via the module's onModuleCanUnload and onModuleUnload), an attempt will be made to //! forcefully/unsafely unload the module. //! //! The given module name must not be nullptr. //! //! This method is thread safe. //! //! \returns Returns kResultSuccess if the module wasn't already loaded or if this method successfully unloaded the //! module. Return an error code otherwise. omni::core::Result unregisterInterfaceImplementationsFromModule(const char* moduleName) noexcept; //! Register the list of types. //! //! Needed data from the "implementations" list is copied by this method. //! //! This method is thread safe. void registerInterfaceImplementations(const omni::core::InterfaceImplementation* implementations, uint32_t implementationsCount, omni::core::TypeFactoryLoadFlags flags) noexcept; //! Maps a type id back to its type name. //! //! The memory returned is valid for the lifetime of ITypeFactory //! //! Returns nullptr if id has never been registered. Types that have been registered, and then unregistered, will //! still have a valid string returned from this method. //! //! This method is thread safe. const char* getTypeIdName(omni::core::TypeId id) noexcept; //! Sets the implementation matching constraints for the given interface id. //! //! See omni::core::ITypeFactory_abi::createType_abi() for how these constraints are used. //! //! moduleName can be nullptr. //! //! if implVersion is 0 and implId is an implementation id, the implementation with the highest version is chosen. //! //! This method is thread safe. void setInterfaceDefaults(omni::core::TypeId interfaceId, omni::core::TypeId implId, const char* moduleName, uint32_t implVersion) noexcept; //! Returns the implementation matching constraints for the given interface id. //! //! See omni::core::ITypeFactory_abi::createType_abi() for how these constraints are used. //! //! If the given output implementation id pointer (outImplid) is not nullptr, it will be populated with the default //! implementation id instantiated when the interface requested to be created. //! //! If the given output implementation version pointer (outImplVersion) is not nullptr, it will be populated with //! the default implementation version instantiated when the interface is requested to be created. //! //! If the output module name pointer (outModuleName) is not nullptr, it will be populated with the name of the //! module searched when trying to find an implementation of the interface. If there is no current default module //! name, the output module name will be populated with the empty string. If the output module name's buffer size is //! insufficient to store the null terminated module name, kResultBufferInsufficient is returned and the module //! name's buffer size is updated with the needed buffer size. //! //! If the output module name is nullptr, the output module name buffer size (inOutModuleNameCount) will be //! populated with the size of the buffer needed to store the module name. //! //! The output module name buffer size pointer (inOutModuleNameCount) must not be nullptr. //! //! If the given interface id is not found, kResultNotFound is returned and the output implementation id (outImplId) //! and version (outImplVersion), if defined, are set to 0. Additionally, the output module name (outModuleName), //! if defined, is set to the empty string. //! //! If kResultInsufficientBuffer and kResultNotFound are both flagged internally, kResultNotFound is returned. //! //! See omni::core::getInterfaceDefaults() for a C++ wrapper to this method. //! //! This method is thread safe. omni::core::Result getInterfaceDefaults(omni::core::TypeId interfaceId, omni::core::TypeId* outImplId, char* outModuleName, uint32_t* inOutModuleNameCount, uint32_t* outImplVersion) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::ObjectPtr<omni::core::IObject> omni::core::Generated<omni::core::ITypeFactory_abi>::createType( omni::core::TypeId id, const char* moduleName, uint32_t implVersion) noexcept { return omni::core::steal(createType_abi(id, moduleName, implVersion)); } inline omni::core::Result omni::core::Generated<omni::core::ITypeFactory_abi>::registerInterfaceImplementationsFromModule( const char* moduleName, omni::core::TypeFactoryLoadFlags flags) noexcept { return registerInterfaceImplementationsFromModule_abi(moduleName, flags); } inline omni::core::Result omni::core::Generated<omni::core::ITypeFactory_abi>::unregisterInterfaceImplementationsFromModule( const char* moduleName) noexcept { return unregisterInterfaceImplementationsFromModule_abi(moduleName); } inline void omni::core::Generated<omni::core::ITypeFactory_abi>::registerInterfaceImplementations( const omni::core::InterfaceImplementation* implementations, uint32_t implementationsCount, omni::core::TypeFactoryLoadFlags flags) noexcept { registerInterfaceImplementations_abi(implementations, implementationsCount, flags); } inline const char* omni::core::Generated<omni::core::ITypeFactory_abi>::getTypeIdName(omni::core::TypeId id) noexcept { return getTypeIdName_abi(id); } inline void omni::core::Generated<omni::core::ITypeFactory_abi>::setInterfaceDefaults(omni::core::TypeId interfaceId, omni::core::TypeId implId, const char* moduleName, uint32_t implVersion) noexcept { setInterfaceDefaults_abi(interfaceId, implId, moduleName, implVersion); } inline omni::core::Result omni::core::Generated<omni::core::ITypeFactory_abi>::getInterfaceDefaults( omni::core::TypeId interfaceId, omni::core::TypeId* outImplId, char* outModuleName, uint32_t* inOutModuleNameCount, uint32_t* outImplVersion) noexcept { return getInterfaceDefaults_abi(interfaceId, outImplId, outModuleName, inOutModuleNameCount, outImplVersion); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL static_assert(std::is_standard_layout<omni::core::InterfaceImplementation>::value, "omni::core::InterfaceImplementation must be standard layout to be used in ONI ABI");
omniverse-code/kit/include/omni/core/Omni.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Main header for the Omniverse core. #pragma once #include "../../carb/extras/Library.h" #include "../../carb/PluginInitializers.h" #include "Api.h" #include "BuiltIn.h" #include "ITypeFactory.h" #include "../log/ILog.h" #include "../structuredlog/IStructuredLog.h" //! Returns the module's name (e.g. "c:/foo/omni-glfw.dll"). The pointer returned is valid for the lifetime of the //! module. //! //! The returned path will be delimited by '/' on all platforms. OMNI_API const char* omniGetModuleFilename(); //! Returns the module's directory name (e.g. "c:/foo" for "c:/foo/omni-glfw.dll"). The pointer returned is valid for //! the lifetime of the module. //! //! The returned path will be delimited by '/' on all platforms. OMNI_API const char* omniGetModuleDirectory(); #if CARB_PLATFORM_WINDOWS || defined(DOXYGEN_BUILD) //! Defines global symbols intended to be used to statically analyze whether a given plugin //! is a debug or release build. In a debug build, the `g_carbIsDebugConfig` symbol will //! be present. In a release build, the `g_carbIsReleaseConfig` symbol will be present. //! These symbols are not intended to be used at runtime, but rather to be able to determine //! the build configuration without having to load up the module in a process first. //! //! These symbols are only present on non-Windows builds. They can be found with a command //! line similar to this: //! `nm <modulePath> | grep g_carbIsDebugConfig` or //! `nm <modulePath> | grep g_carbIsReleaseConfig` //! //! On Windows, each module's "properties" window will list "(Debug)" in the "Product Name" //! field of the "Details" tab for debug builds. # define OMNI_MODULE_GLOBALS_BUILD_CONFIG_SYMBOLS() #else # if CARB_DEBUG # define OMNI_MODULE_GLOBALS_BUILD_CONFIG_SYMBOLS() CARB_HIDDEN int g_carbIsDebugConfig = 1 # else # define OMNI_MODULE_GLOBALS_BUILD_CONFIG_SYMBOLS() CARB_HIDDEN int g_carbIsReleaseConfig = 1 # endif #endif //! Defines functions centered around determining the current module's disk location. //! //! Internal macro to reduce code duplication. Do not directly use. #define OMNI_MODULE_DEFINE_LOCATION_FUNCTIONS() \ OMNI_API const char* omniGetModuleFilename() \ { \ static std::string s_omniModuleFilename = carb::extras::getLibraryFilename((void*)(omniGetModuleFilename)); \ return s_omniModuleFilename.c_str(); \ } \ OMNI_API const char* omniGetModuleDirectory() \ { \ static std::string s_omniModuleDirectory = carb::extras::getLibraryDirectory((void*)(omniGetModuleDirectory)); \ return s_omniModuleDirectory.c_str(); \ } //! Defines default implementations of global omni functions for a module. //! //! Internal macro to reduce code duplication. Do not directly use. Use @ref OMNI_MODULE_GLOBALS(). #define OMNI_MODULE_DEFINE_OMNI_FUNCTIONS() \ namespace \ { \ ::omni::core::ITypeFactory* s_omniTypeFactory = nullptr; \ ::omni::log::ILog* s_omniLog = nullptr; \ ::omni::structuredlog::IStructuredLog* s_omniStructuredLog = nullptr; \ } \ OMNI_MODULE_DEFINE_LOCATION_FUNCTIONS() \ OMNI_MODULE_GLOBALS_BUILD_CONFIG_SYMBOLS(); \ OMNI_API void* omniGetBuiltInWithoutAcquire(OmniBuiltIn type) \ { \ switch (type) \ { \ case ::OmniBuiltIn::eITypeFactory: \ return s_omniTypeFactory; \ case ::OmniBuiltIn::eILog: \ return s_omniLog; \ case ::OmniBuiltIn::eIStructuredLog: \ return s_omniStructuredLog; \ default: \ return nullptr; \ } \ } //! Type of the `omniCarbStartup` function that is generated by \ref OMNI_MODULE_DEFINE_CARB_FUNCTIONS using OmniCarbStartupFn = const char* (*)(carb::Framework*); //! Type of the `omniCarbShutdown` function that is generated by \ref OMNI_MODULE_DEFINE_CARB_FUNCTIONS using OmniCarbShutdownFn = void (*)(); //! Defines default implementations of global Carbonite functions for an Omni module. //! //! Internal macro to reduce code duplication. Do not directly use. Use @ref OMNI_MODULE_GLOBALS(). #define OMNI_MODULE_DEFINE_CARB_FUNCTIONS() \ OMNI_EXPORT const char* omniCarbStartup(carb::Framework* framework) \ { \ g_carbFramework = framework; \ carb::pluginInitialize(); \ return g_carbClientName; \ } \ OMNI_EXPORT void omniCarbShutdown() \ { \ carb::pluginDeinitialize(); \ } //! Implementation detail. Do not directly use. Use @ref OMNI_GLOBALS_ADD_DEFAULT_CHANNEL. #define OMNI_GLOBALS_ADD_DEFAULT_CHANNEL_1(chan_, name_, desc_) OMNI_LOG_ADD_CHANNEL(chan_, name_, desc_) //! Adds the @p name_ as the default logging channel. //! //! It's unlikely user code will have to use this, as code like @ref CARB_GLOBALS_EX call this macro on behalf of most //! clients. #define OMNI_GLOBALS_ADD_DEFAULT_CHANNEL(name_, desc_) \ OMNI_GLOBALS_ADD_DEFAULT_CHANNEL_1(OMNI_LOG_DEFAULT_CHANNEL, name_, desc_) //! Helper macro to declare globals needed by modules (i.e. plugins). //! //! Use with @ref OMNI_MODULE_SET_EXPORTS_WITHOUT_CARB(). //! //! This macro is like @ref OMNI_MODULE_GLOBALS() but disables the interop between Carbonite interfaces and ONI. This //! macro is useful if you never plan on accessing Carbonite interfaces within your module. #define OMNI_MODULE_GLOBALS_WITHOUT_CARB(name_, desc_) \ OMNI_MODULE_DEFINE_OMNI_FUNCTIONS() \ OMNI_GLOBALS_ADD_DEFAULT_CHANNEL(name_, desc_) //! Helper macro to declare globals needed by modules (i.e. plugins). //! //! Use with @ref OMNI_MODULE_SET_EXPORTS(). #define OMNI_MODULE_GLOBALS(name_, desc_) \ OMNI_MODULE_DEFINE_OMNI_FUNCTIONS() \ OMNI_MODULE_DEFINE_CARB_FUNCTIONS() \ CARB_GLOBALS_EX(name_, desc_) //! Helper macro to set known export fields in @ref omniModuleGetExports(). //! //! Use this macro in conjunction with @ref OMNI_MODULE_GLOBALS_WITHOUT_CARB(). #define OMNI_MODULE_SET_EXPORTS_WITHOUT_CARB(out_) \ do \ { \ OMNI_RETURN_IF_FAILED(out_->checkVersion(omni::core::kModuleExportsMagic, omni::core::kModuleExportsVersion)); \ OMNI_RETURN_IF_FAILED(out_->addITypeFactory(&s_omniTypeFactory)); \ OMNI_RETURN_IF_FAILED(out_->addILog(&s_omniLog)); \ OMNI_RETURN_IF_FAILED(out_->addIStructuredLog(&s_omniStructuredLog)); \ for (auto channel = omni::log::getModuleLogChannels(); channel; channel = channel->next) \ { \ OMNI_MODULE_ADD_LOG_CHANNEL(out_, channel->name, &channel->level, channel->description); \ } \ for (auto& schema : omni::structuredlog::getModuleSchemas()) \ { \ OMNI_MODULE_ADD_STRUCTURED_LOG_SCHEMA(out_, schema); \ } \ } while (0) //! Helper macro to set known export fields in @ref omniModuleGetExports(). //! //! Use this macro in conjunction with @ref OMNI_MODULE_GLOBALS(). #define OMNI_MODULE_SET_EXPORTS(out_) \ OMNI_MODULE_SET_EXPORTS_WITHOUT_CARB(out_); \ OMNI_MODULE_SET_CARB_EXPORTS(out_) //! Helper macro to set known export fields in @ref omniModuleGetExports() related to Carbonite. //! //! Internal macro to reduce code duplication. Do not directly use. Use @ref OMNI_MODULE_SET_EXPORTS(). #define OMNI_MODULE_SET_CARB_EXPORTS(out_) \ OMNI_RETURN_IF_FAILED(out_->addCarbClientName(g_carbClientName)); \ OMNI_RETURN_IF_FAILED(out_->addCarbFramework(&g_carbFramework, carb::kFrameworkVersion)); \ OMNI_RETURN_IF_FAILED(out_->addCarbIAssert(&g_carbAssert, carb::assert::IAssert::getInterfaceDesc())); \ OMNI_RETURN_IF_FAILED(out_->addCarbILogging(&g_carbLogging, &g_carbLogFn, \ [](int32_t logLevel) { g_carbLogLevel = logLevel; }, &g_carbLogLevel, \ carb::logging::ILogging::getInterfaceDesc())); \ OMNI_RETURN_IF_FAILED(out_->addCarbIProfiler(&g_carbProfiler, carb::profiler::IProfiler::getInterfaceDesc())); \ OMNI_RETURN_IF_FAILED( \ out_->addCarbIL10n(&g_carbLocalization, &g_localizationFn, carb::l10n::IL10n::getInterfaceDesc())) //! Helper macro to declare globals needed my the omni library when using the omni library in an application. //! //! \note Either this macro, or \ref CARB_GLOBALS, or \ref CARB_GLOBALS_EX must be specified in the global namespace //! in exactly one compilation unit for a Carbonite Application. //! //! See @ref OMNI_CORE_INIT(). //! @param clientName The name of the client application. Must be unique with respect to any plugins loaded. Also is the //! name of the default log channel. //! @param clientDescription A description to use for the default log channel. #define OMNI_APP_GLOBALS(clientName, clientDescription) CARB_GLOBALS_EX(clientName, clientDescription) //! Helper macro to startup the Carbonite framework and Omni type factory. //! //! See @ref OMNI_CORE_INIT(). #define OMNI_CORE_START(args_) \ omniCoreStart(args_); \ omni::log::addModulesChannels(); \ omni::structuredlog::addModulesSchemas(); \ carb::detail::registerAtexitHandler() //! Helper macro to shutdown the Carbonite framework and Omni type factory. //! //! See @ref OMNI_CORE_INIT(). #define OMNI_CORE_STOP() \ omni::log::removeModulesChannels(); \ omniCoreStop() //! Helper macro to shutdown the Carbonite framework and Omni type factory, for script bindings. //! //! See @ref OMNI_CORE_INIT(). #define OMNI_CORE_STOP_FOR_BINDINGS() \ omni::log::removeModulesChannels(); \ omniCoreStopForBindings() //! Version of @ref OmniCoreStartArgs struct passed to @ref omniCoreStart. //! //! The version should be incremented only when removing/rearranging fields in @ref OmniCoreStartArgs. Adding fields //! that default to `nullptr` or `0` (from the reserved space) is allowed without incrementing the version. constexpr uint16_t kOmniCoreStartArgsVersion = 1; //! Base type for the Omni core startup flags. using OmniCoreStartFlags = uint32_t; //! Flag to indicate that ILog usage should be disabled on startup instead of creating the //! internal version or expecting that the caller to provide an implementation of the ILog //! interface that has already been instantiated. constexpr OmniCoreStartFlags fStartFlagDisableILog = 0x00000001; //! Flag to indicate that IStructuredLog usage should be disabled on startup instead of //! creating the internal version or expecting that the caller to provide an implementation //! of the IStructuredLog interface that has already been instantiated. constexpr OmniCoreStartFlags fStartFlagDisableIStructuredLog = 0x00000002; //! Arguments passed to omniCoreStart(). class OmniCoreStartArgs { public: //! Version of this structure. The version should be incremented only when removing/rearranging fields. Adding //! fields (from the reserved space) is allowed without increment the version. //! //! This fields value should always be set to @ref kOmniCoreStartArgsVersion. uint16_t version; //! Size of this structure. uint16_t byteCount; //! flags to control the behavior of the Omni core startup. OmniCoreStartFlags flags; //! The type factory that will be returned by omniGetTypeFactoryWithoutAcquire(). //! //! omniCoreStart will call acquire() on the given type factory. //! //! If the given parameter is nullptr, omniCreateTypeFactory() will be called. omni::core::ITypeFactory* typeFactory; //! The log that will be returned by omniGetLogWithoutAcquire(). //! //! omniCoreStart will call acquire() on the given log. //! //! If the given parameter is nullptr, omniCreateLog() will be called. omni::log::ILog* log; //! The structured log object that will be returned by omniGetStructuredLogWithoutAcquire(). //! //! omniCoreStart will call acquire on the given structured log object. //! //! If the given parameter is nullptr, the default implementation object will be used instead. omni::structuredlog::IStructuredLog* structuredLog; //! When adding fields, decrement this reserved space. Be mindful of alignment (explicitly add padding fields if //! needed). void* reserved[12]; //! Default constructor. OmniCoreStartArgs() { std::memset(this, 0, sizeof(*this)); version = kOmniCoreStartArgsVersion; byteCount = sizeof(*this); } //! Constructor which accepts default implementations for the core. OmniCoreStartArgs(omni::core::ITypeFactory* factory_, omni::log::ILog* log_ = nullptr, omni::structuredlog::IStructuredLog* strucLog_ = nullptr) : OmniCoreStartArgs() { typeFactory = factory_; log = log_; structuredLog = strucLog_; } }; CARB_ASSERT_INTEROP_SAFE(OmniCoreStartArgs); static_assert((8 + 15 * sizeof(void*)) == sizeof(OmniCoreStartArgs), "OmniCoreStartArgs has an unexpected size"); //! Initializes the omni core library's internal data structures. //! //! nullptr is accepted, in which case the default behavior described in OmniCoreStartArgs is applied. //! //! See @ref OMNI_CORE_INIT(). OMNI_API void omniCoreStart(const OmniCoreStartArgs* args); //! Tears down the omni core library's internal data structures. //! //! See @ref OMNI_CORE_INIT(). OMNI_API void omniCoreStop(); //! Tears down the omni core library's internal data structures for script bindings. //! //! See @ref OMNI_CORE_INIT(). OMNI_API void omniCoreStopForBindings(); //! Releases the structured log pointer. //! //! This should be called before unloading plugins so that the structured log plugin properly shuts down. OMNI_API void omniReleaseStructuredLog(); #include "../../carb/ClientUtils.h"
omniverse-code/kit/include/omni/core/Types.gen.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/OmniAttr.h> #include <omni/core/Interface.h> #include <omni/core/ResultError.h> #include <functional> #include <utility> #include <type_traits> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL static_assert(std::is_standard_layout<omni::core::UInt2>::value, "omni::core::UInt2 must be standard layout to be used in ONI ABI"); static_assert(std::is_standard_layout<omni::core::Int2>::value, "omni::core::Int2 must be standard layout to be used in ONI ABI"); static_assert(std::is_standard_layout<omni::core::Float2>::value, "omni::core::Float2 must be standard layout to be used in ONI ABI");
omniverse-code/kit/include/omni/usd/UsdTypes.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/kit/KitTypes.h> #include <functional> namespace omni { namespace usd { typedef uint64_t SubscriptionId; /** * Defines the USD state types. */ enum class StageState { eClosed, ///! USD is closed/unopened. eOpening, ///! USD is opening. eOpened, ///! USD is opened. eClosing ///! USD is closing. }; /** * Defines the usd event types. */ enum class StageEventType { eSaved, ///! USD file saved. eSaveFailed, ///! Failed to save USD. eOpening, ///! USD stage is opening. eOpened, ///! USD stage is opened successfully. eOpenFailed, ///! USD stage failed to open. eClosing, ///! USD stage is about to close. This is a good opportunity to shutdown anything depends on USD stage. eClosed, ///! USD stage is fully closed. eSelectionChanged, ///! USD Prim selection has changed. eAssetsLoaded, ///! Current batch of async asset loading has been completed. eAssetsLoadAborted, ///! Current batch of async asset loading has been aborted. eGizmoTrackingChanged, ///! Started or stopped tracking (hovering) on a gizmo eMdlParamLoaded, ///! MDL parameter is loaded for a MDL UsdShadeShader. eSettingsLoaded, /// Stage settings have loaded eSettingsSaving, /// Stage settings are being saved eOmniGraphStartPlay, /// OmniGraph play has started eOmniGraphStopPlay, /// OmniGraph play has stopped eSimulationStartPlay, /// Simulation play has started eSimulationStopPlay, /// Simulation play has stopped eAnimationStartPlay, /// Animation playback has started eAnimationStopPlay, /// Animation playback has stopped eDirtyStateChanged, /// Dirty state of USD stage has changed. Dirty state means if it has unsaved changes or not. eAssetsLoading, ///! A new batch of async asset loading has started. eActiveLightsCountChanged, ///! Number of active lights in the scene has changed. This signal should be triggered /// every time a scene has been loaded or number of lights has been changed. A few /// features, for instance view lighting mode, need to detect when a number of active /// lights becomes zero / become non-zero. eHierarchyChanged, ///! USD stage hierarchy has changed. eHydraGeoStreamingStarted, ///! Fabric Scene Delegate sends this when starting to stream rprims. eHydraGeoStreamingStopped, ///! Fabric Scene Delegate sends this when stopping to stream rprims. eHydraGeoStreamingStoppedNotEnoughMem, ///! Fabric Scene Delegate sends this when geometry streaming stops loading more geometry because of insufficient device memory eHydraGeoStreamingStoppedAtLimit, ///! Fabric Scene Delegate sends this when stopping to stream rprims because of the limit set by the user. eSaving, ///! Saving is in progress }; enum class StageRenderingEventType { eNewFrame, ///! New frame available for Viewport, params are ViewportHandle, FrameNo, RenderResults /// Frames complete for a single hydra engine render() invocation. /// Payload is { /// render_results: [ { viewport_handle: ViewportHandle, product: HydraRenderProduct*, subframe_count: int32_t } ], /// average_frame_time_ns: float, /// swh_frame_number: uint64_t, /// } eHydraEngineFramesComplete, /// Frames added to the GPU queue for a single hydra engine. /// This event signifies that the frame is scheduled for GPU rendering and has not been rendered yet. /// The payload structure is the same as eHydraEngineFramesComplete. eHydraEngineFramesAdded, }; using OnStageResultFn = std::function<void(bool result, const char* err)>; using OnLayersSavedResultFn = std::function<void(bool result, const char* err, const std::vector<std::string>& savedLayers)>; using OnPickingCompleteFn = std::function<void(const char* path, const carb::Double3* worldPos)>; } }
omniverse-code/kit/include/omni/usd/UsdUtils.h
// Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #ifndef USD_UTILS_INCLUDES # error "Please include UtilsIncludes.h before including this header or in pre-compiled header." #endif #include "PathUtils.h" #include <omni/kit/SettingsUtils.h> #include <carb/InterfaceUtils.h> #include <carb/datasource/IDataSource.h> #include <carb/extras/Path.h> #include <carb/extras/StringUtils.h> #include <carb/filesystem/IFileSystem.h> #include <carb/logging/Log.h> #include <carb/omniclient/OmniClientUtils.h> #include <carb/profiler/Profile.h> #include <carb/settings/ISettings.h> #include <functional> #include <regex> #include <vector> PXR_NAMESPACE_OPEN_SCOPE inline bool GfIsClose(const pxr::GfQuatd& val1, const pxr::GfQuatd& val2, double tolerance) { bool result1 = pxr::GfIsClose(val1.GetReal(), val2.GetReal(), tolerance) && GfIsClose(val1.GetImaginary(), val2.GetImaginary(), tolerance); bool result2 = GfIsClose(val1.GetReal(), -val2.GetReal(), tolerance) && GfIsClose(val1.GetImaginary(), -val2.GetImaginary(), tolerance); return result1 || result2; } PXR_NAMESPACE_CLOSE_SCOPE namespace omni { namespace usd { static constexpr char kAuthorOldMdlSchemaSettingPath[] = "/omni.kit.plugin/authorOldMdlSchema"; static constexpr char kCreateXformForTypelessReferenceSettingPath[] = "/omni.kit.plugin/createXformForTypelessReference"; static constexpr char kCreateExplicitRefForNoneDefaultPrimSettingPath[] = "/omni.kit.plugin/createExplicitRefForNoneDefaultPrim"; static constexpr char kAuthorXformsWithFastUpdatesSettingPath[] = "/omni.kit.plugin/authorXformsWithFastUpdates"; static constexpr char kDefaultRotationOrderSettingPath[] = PERSISTENT_SETTINGS_PREFIX "/app/primCreation/DefaultRotationOrder"; static constexpr char kDefaultXforOpTypeSettingPath[] = PERSISTENT_SETTINGS_PREFIX "/app/primCreation/DefaultXformOpType"; /** * Defines a helper class to perform various USD operations. * * Because different project might want to include USD headers differently (Kit uses pre-compiled header for USD * includes), UsdUtils.h doesn't include any USD headers. Users are responsible for including the correct headers. */ class UsdUtils { public: /** * Defines a helper class to do scoped layer editing. It sets scoped edit target and layer editing permission. */ class ScopedLayerEdit { public: ScopedLayerEdit(pxr::UsdStageWeakPtr stage, const pxr::SdfLayerHandle& layer) : m_usdEditCtx(stage, layer), m_layer(layer) { m_wasLayerEditable = m_layer->PermissionToEdit(); m_layer->SetPermissionToEdit(true); CARB_ASSERT(m_layer->PermissionToEdit()); } ~ScopedLayerEdit() { m_layer->SetPermissionToEdit(m_wasLayerEditable); } private: pxr::UsdEditContext m_usdEditCtx; pxr::SdfLayerHandle m_layer; bool m_wasLayerEditable; }; /** * Helper base class to subscribe to pxr::TfNotice */ template <typename T> class UsdNoticeListener : public pxr::TfWeakBase { public: virtual ~UsdNoticeListener() { revokeListener(); } void registerListener() { // To avoid leak revokeListener(); m_usdNoticeListenerKey = pxr::TfNotice::Register(pxr::TfCreateWeakPtr(this), &UsdNoticeListener::handleNotice); } void revokeListener() { if (m_usdNoticeListenerKey.IsValid()) { pxr::TfNotice::Revoke(m_usdNoticeListenerKey); } } virtual void handleNotice(const T& objectsChanged) = 0; private: pxr::TfNotice::Key m_usdNoticeListenerKey; }; using OnCreateFn = std::function<pxr::UsdPrim(pxr::UsdStageWeakPtr stage, const pxr::SdfPath& path)>; /** * Checks if a UsdGeomXformable instance is time sampled. * * @param xform The UsdGeomXformable to be checked. * @return True if the xform is timesampled. */ static bool isTimeSampled(const pxr::UsdGeomXformable& xform) { // Iterate every xformOps check if any of them is timesampled. // Don't call xform.GetTimeSamples() since this function will perform a very low efficient timesamples union for // all xformOps. // Also xform.TransformsMightBeTimeVarying() is not fit here since it will return true only one of // the xformOp's numTimesamples > 1. bool resetXformStack = false; auto xformOps = xform.GetOrderedXformOps(&resetXformStack); for (auto xformOp : xformOps) { if (xformOp.GetNumTimeSamples() > 0) { return true; } } return false; } /** * Checks if a UsdGeomXformable has timeSample on Key time * TODO: currently imgizmo can only handle matrix, which brings that we need to set translate/rotation/scale have * same key times But this is not correct, need to be fixed. * @param xform The UsdGeomXformable to be checked. * @param timeCode The timeCode to be checked on xform. * @return True if the xform is timesampled on key timeCode. */ static bool hasTimeSample(const pxr::UsdGeomXformable& xform, pxr::UsdTimeCode timeCode) { if (timeCode.IsDefault()) return false; bool resetXformStack = false; auto xformOps = xform.GetOrderedXformOps(&resetXformStack); for (auto xformOp : xformOps) { if (hasTimeSample(xformOp.GetAttr(), timeCode)) { return true; } } return false; } /** * Checks if a UsdAttribute instance is time sampled. * * @param attribute The UsdAttribute to be checked. * @return True if the attribute is timesampled. */ static bool isTimeSampled(const pxr::UsdAttribute& attribute) { return attribute.GetNumTimeSamples() > 0; } /** * Checks if a UsdAttribute instance has time sample on key timeCode * * @param attribute The UsdAttribute to be checked. * @param timeCode The timeCode to be checked. * @return True if the attribute has timesampled on key timeCode. */ static bool hasTimeSample(const pxr::UsdAttribute& attribute, pxr::UsdTimeCode timeCode) { if (timeCode.IsDefault()) return false; std::vector<double> times; if (attribute.GetTimeSamples(&times)) { double timeCodeValue = timeCode.GetValue(); if (round(timeCodeValue) != timeCode.GetValue()) { CARB_LOG_WARN("Error : Try to identify attribute %s has time sample on a fractinal key frame %f", attribute.GetPath().GetText(), timeCodeValue); return false; } return std::find(times.begin(), times.end(), timeCodeValue) != times.end(); } return false; } /** * Gets current UsdTimeCode of given stage. * * @param stage The stage to get time code from. * @param isTimeSampled If the property is timesampled. * @param time Current timecode. * @return Current timecode of the stage. */ static pxr::UsdTimeCode getUsdTimeCode(pxr::UsdStageWeakPtr stage, bool isTimeSampled, pxr::UsdTimeCode time) { return isTimeSampled ? time : pxr::UsdTimeCode::Default(); } /** * Removes a prim from stage. * * @param prim The prim instance to be removed. * @return True if prim is removed successfully. */ static bool removePrim(pxr::UsdPrim& prim) { auto stage = prim.GetStage(); bool ret = false; if (checkAncestral(prim)) { CARB_LOG_ERROR("Cannot remove ancestral prim %s", prim.GetPath().GetText()); return false; } pxr::SdfChangeBlock changeBlock; if (stage->HasDefaultPrim() && stage->GetDefaultPrim() == prim) { stage->ClearDefaultPrim(); } auto layerStack = stage->GetLayerStack(); std::set<pxr::SdfLayerHandle> layerSet(layerStack.begin(), layerStack.end()); auto primStack = prim.GetPrimStack(); for (auto&& primSpec : primStack) { auto layer = primSpec->GetLayer(); // Only remove from layers in the stage if (layerSet.find(layer) == layerSet.end() || primSpec->GetPath() != prim.GetPath()) { continue; } pxr::SdfBatchNamespaceEdit nsEdits; nsEdits.Add(pxr::SdfNamespaceEdit::Remove(primSpec->GetPath())); ret |= layer->Apply(nsEdits); } return ret; } /** * Removes a prim from layer * * @param prim The prim instance to be removed. * @param layer Layer from which to remove prim * @return True if prim is removed successfully. */ static bool removePrimFromLayer(pxr::UsdPrim& prim, pxr::SdfLayerHandle layer) { auto stage = prim.GetStage(); if (checkAncestral(prim)) { CARB_LOG_ERROR("Cannot remove ancestral prim %s", prim.GetPath().GetText()); return false; } pxr::SdfChangeBlock changeBlock; if (stage->HasDefaultPrim() && stage->GetDefaultPrim() == prim) { stage->ClearDefaultPrim(); } auto primSpec = layer->GetPrimAtPath(prim.GetPath()); auto parent = primSpec->GetNameParent(); if (!parent) { parent = layer->GetPseudoRoot(); } return parent->RemoveNameChild(primSpec); } /** * Copies a prim in the stage. * * @param prim The prim to be copied. * @param tarPath The target path of copied prim, or nullptr if you want a auto generated path (e.g. "foo" -> * "foo_2"). * @param defOrRefOnly True to only copy the layer that defines or references the Prim. False copy deltas on all * layers. @p defOrRefOnly has no effect if @p combineAllLayers is true. * @param combineAllLayers True to combine deltas on all layers and make a consolidated copy on current edit layer. * False to copy each delta on their own layer. * @return The copied prim. Call IsValid on the return value to check if copy is successful. */ static PXR_NS::UsdPrim copyPrim(PXR_NS::UsdPrim& prim, const char* tarPath, bool defOrRefOnly, bool combineAllLayers) { PXR_NS::UsdPrim ret; auto stage = prim.GetStage(); std::string tarPathStr; if (tarPath) { tarPathStr = tarPath; } else { tarPathStr = findNextNoneExisitingNodePath(stage, prim.GetPath().GetString(), false); } PXR_NS::SdfPath srcPath(prim.GetPath()); PXR_NS::SdfPath dstPath(tarPathStr); if (!combineAllLayers) { PXR_NS::SdfChangeBlock changeBlock; auto layers = prim.GetStage()->GetLayerStack(); for (const auto& layer : layers) { auto oldPrimSpec = layer->GetPrimAtPath(srcPath); if (oldPrimSpec) { PXR_NS::SdfLayerHandle dstLayer = layer; if (!defOrRefOnly) { dstLayer = stage->GetEditTarget().GetLayer(); } if (defOrRefOnly || (!defOrRefOnly && oldPrimSpec->HasReferences()) || oldPrimSpec->GetSpecifier() == PXR_NS::SdfSpecifier::SdfSpecifierDef) { PXR_NS::SdfCreatePrimInLayer(dstLayer, dstPath); PXR_NS::SdfCopySpec(layer, srcPath, dstLayer, dstPath); if (!defOrRefOnly) { break; } } } } } else // Combine all prim spec from all visible layers and copy to current edit target { auto editTargetLayerHandle = stage->GetEditTarget().GetLayer(); // Make a temporary stage to hold the Prim to copy, and flatten it later auto flattenStage = PXR_NS::UsdStage::CreateInMemory(); PXR_NS::SdfPrimSpecHandleVector primSpecs; // If the prim is introduced by its ancestor, its primSpec might now exist in current stage (if no "over" is // made to it) we need to copy from its primStack if (checkAncestral(prim)) { primSpecs = prim.GetPrimStack(); } else { for (auto& layer : prim.GetStage()->GetLayerStack()) { auto primSpec = layer->GetPrimAtPath(prim.GetPath()); if (primSpec) { primSpecs.push_back(primSpec); } } } for (const auto& primSpec : primSpecs) { auto srcLayer = primSpec->GetLayer(); PXR_NS::SdfLayerRefPtr dstLayer = nullptr; if (srcLayer->IsAnonymous()) { // If src layer is Anonymous, all relative path has been converted to absolute path. dstLayer = PXR_NS::SdfLayer::CreateAnonymous(); } else { // If src layer is not Anonymous, we need to create a layer at the same parent dir so // relative path can be resolved. carb::extras::Path layerPath(srcLayer->GetRealPath()); size_t counter = 0; while (!dstLayer) { std::string tempFileName = layerPath.getParent() / layerPath.getStem() + std::to_string(counter++) + layerPath.getExtension(); // Make sure we create a none-exist temp layer if (PXR_NS::SdfLayer::FindOrOpen(tempFileName)) { continue; } auto format = PXR_NS::SdfFileFormat::FindByExtension(layerPath.getExtension()); // Use PXR_NS::SdfLayer::New instead of PXR_NS::SdfLayer::CreateNew so no file is created dstLayer = PXR_NS::SdfLayer::New(format, tempFileName); } } PXR_NS::SdfCreatePrimInLayer(dstLayer, srcPath); PXR_NS::SdfCopySpec(srcLayer, primSpec->GetPath(), dstLayer, srcPath); flattenStage->GetRootLayer()->InsertSubLayerPath(dstLayer->GetIdentifier()); } auto flattenLayer = flattenStage->Flatten(); PXR_NS::SdfCopySpec(flattenLayer, srcPath, editTargetLayerHandle, dstPath); } prim = stage->GetPrimAtPath(dstPath); return prim; } /** * Resolve all prim path reference to use new path. This is mainly used to remapping * prim path reference after structure change of original prim. * @param layer Layer to resolve. * @param oldPath Old prim path. * @param newPath New prim path that all old prim path references will be resolved to. */ static void resolvePrimPathReferences(const PXR_NS::SdfLayerRefPtr& layer, const PXR_NS::SdfPath& oldPath, const PXR_NS::SdfPath& newPath) { static auto updatePrimPathRef = [](const PXR_NS::SdfPrimSpecHandle& primSpec, const PXR_NS::SdfPath& oldPath, const PXR_NS::SdfPath& newPath) { auto modifyItemEditsCallback = [&oldPath, &newPath](const PXR_NS::SdfPath& path) { return path.ReplacePrefix(oldPath, newPath); }; auto modifyItemReferencesCallback = [&oldPath, &newPath](const PXR_NS::SdfReference& reference) { PXR_NS::SdfPath primPath; if (reference.GetAssetPath().empty()) { primPath = reference.GetPrimPath().ReplacePrefix(oldPath, newPath); } else { primPath = reference.GetPrimPath(); } return PXR_NS::SdfReference( reference.GetAssetPath(), primPath, reference.GetLayerOffset(), reference.GetCustomData() ); }; // Update relationships for (const auto& relationship : primSpec->GetRelationships()) { relationship->GetTargetPathList().ModifyItemEdits(modifyItemEditsCallback); } // Update connections for (const auto& attribute : primSpec->GetAttributes()) { attribute->GetConnectionPathList().ModifyItemEdits(modifyItemEditsCallback); } primSpec->GetReferenceList().ModifyItemEdits(modifyItemReferencesCallback); }; auto onPrimSpecPath = [&layer, &oldPath, &newPath](const PXR_NS::SdfPath& primPath) { if (primPath.IsPropertyPath() || primPath == PXR_NS::SdfPath::AbsoluteRootPath()) { return; } auto primSpec = layer->GetPrimAtPath(primPath); if (primSpec) { updatePrimPathRef(primSpec, oldPath, newPath); } }; layer->Traverse(PXR_NS::SdfPath::AbsoluteRootPath(), onPrimSpecPath); } /** * Moves a prim to a new path. * * @param prim The prim to be moved. * @param newPath The new path to move prim to. * @param supportNamespaceEdit false if (omni::usd::UsdContext::getContext()->isStageLive() && * strncmp(realPath.c_str(), "omniverse:", 10) == 0 && pxr::SdfFileFormat::GetFileExtension(realPath) != "usdov") * @return True if Prim is moved successfully. */ static bool movePrim(pxr::UsdPrim& prim, const char* newPath, bool supportNamespaceEdit) { if (!prim) { return false; } if (checkAncestral(prim)) { CARB_LOG_ERROR("Cannot remove ancestral prim %s", prim.GetPath().GetText()); return false; } auto stage = prim.GetStage(); const char* oldPath = prim.GetPath().GetText(); // If src and target path are the same, don't move. if (strcmp(newPath, oldPath) == 0) { return false; } bool wasDefaultPrim = stage->GetDefaultPrim() == prim; auto newPathStr = findNextNoneExisitingNodePath(prim.GetStage(), newPath, false); if (!pxr::SdfPath::IsValidPathString(newPathStr)) { return false; } pxr::SdfPath newSdfPath(newPathStr); pxr::TfToken newName = newSdfPath.GetNameToken(); pxr::SdfPath newParentPath = newSdfPath.GetParentPath(); pxr::SdfPath oldSdfPath = prim.GetPath(); pxr::TfToken oldName = prim.GetName(); pxr::SdfPath oldParentPath = prim.GetParent().GetPath(); auto layerStack = stage->GetLayerStack(); std::set<pxr::SdfLayerHandle> layerSet(layerStack.begin(), layerStack.end()); auto primStack = prim.GetPrimStack(); for (auto&& primSpec : primStack) { bool useNamespaceEdit = true; auto layer = primSpec->GetLayer(); // Only remove from layers in the stage if (layerSet.find(layer) == layerSet.end()) { continue; } if (!supportNamespaceEdit) { CARB_LOG_WARN( "Current USD format doesn't support NamespaceEdit to move/rename prim, fallback to copy/delete"); useNamespaceEdit = false; } if (useNamespaceEdit) { pxr::SdfBatchNamespaceEdit nsEdits; if (oldParentPath == newParentPath) { CARB_LOG_INFO("Rename %s to %s", oldSdfPath.GetText(), (oldSdfPath.GetParentPath().AppendChild(newName)).GetText()); nsEdits.Add(pxr::SdfNamespaceEdit::Rename(oldSdfPath, newName)); } else { // SdfNamespaceEdit::Reparent somehow doesn't work for move without renaming CARB_LOG_INFO("Move %s to %s", oldSdfPath.GetText(), (newParentPath.AppendChild(newName)).GetText()); nsEdits.Add(pxr::SdfNamespaceEdit::ReparentAndRename( primSpec->GetPath(), newParentPath, newName, pxr::SdfNamespaceEdit::AtEnd)); } layer->Apply(nsEdits); } else { if (pxr::SdfCreatePrimInLayer(layer, newSdfPath)) { if (pxr::SdfCopySpec(layer, oldSdfPath, layer, newSdfPath)) { pxr::SdfBatchNamespaceEdit nsEdits; nsEdits.Add(pxr::SdfNamespaceEdit::Remove(oldSdfPath)); layer->Apply(nsEdits); } } } auto newPrim = layer->GetPrimAtPath(newSdfPath); if (newPrim) { // Fixup connections and relationships resolvePrimPathReferences(layer, prim.GetPrimPath(), newSdfPath); } } auto newPrim = stage->GetPrimAtPath(newSdfPath); if (newPrim) { // Restore defaultPrim state if (wasDefaultPrim && newPrim.GetParent() == stage->GetPseudoRoot()) { stage->SetDefaultPrim(newPrim); } } return newPrim.IsValid(); } /** * Creates a prim on given stage. * * @param stage The stage to create prim on. * @param path The path to create prim at. * @param onCreateFn The creating function for the prim. * @param prependDefaultPrimPath Whether to prepend defaultPrim path to Prim path. True to put the prim under * defaultPrim path. * @return The created prim. Call IsValid on the return value to check if creation is successful. */ static pxr::UsdPrim createPrim(pxr::UsdStageWeakPtr stage, const char* path, OnCreateFn onCreateFn, bool prependDefaultPrimPath = true) { std::string newPath = findNextNoneExisitingNodePath(stage, path, prependDefaultPrimPath); return onCreateFn(stage, pxr::SdfPath(newPath)); } /** * Creates a prim on given stage. * * @param stage The stage to create prim on. * @param path The path to create prim at. * @param typeName The type name of the prim. * @param prependDefaultPrimPath Whether to prepend defaultPrim path to Prim path. True to put the prim under * defaultPrim path. * @return The created prim. Call IsValid on the return value to check if creation is successful. */ static pxr::UsdPrim createPrim(pxr::UsdStageWeakPtr stage, const char* path, const char* typeName, bool prependDefaultPrimPath = true) { std::string newPath = findNextNoneExisitingNodePath(stage, path, prependDefaultPrimPath); return stage->DefinePrim(pxr::SdfPath(newPath), pxr::TfToken(typeName)); } /** * Returns if a prim exists at given path. * * @param stage The stage to check prim existence. * @param path The prim path * @param prependDefaultPrimPath Whether to check under defaultPrim path or stage root. */ static bool hasPrimAtPath(pxr::UsdStageWeakPtr stage, std::string path, bool prependDefaultPrimPath = true) { if (prependDefaultPrimPath && stage->HasDefaultPrim()) { path = stage->GetDefaultPrim().GetPath().GetString() + path; } return stage->GetPrimAtPath(pxr::SdfPath(path)).IsValid(); } /** * Gets if a prim is visible. * * @param prim The prim to get visibility from. * @param prim The visibility of the prim's parent. * @param time Current timecode. * @return True if prim is visible. */ static bool isPrimVisible(const pxr::UsdPrim& prim, pxr::UsdTimeCode time = pxr::UsdTimeCode::Default()) { pxr::UsdGeomImageable imageable(prim); auto visibilityAttr = imageable.GetVisibilityAttr(); auto visibility = imageable.ComputeVisibility(time); return visibility != pxr::UsdGeomTokens->invisible; } /** * Sets the visibility on a prim. * * @param prim The prim to set visibility. * @param visible True to make a prim visible. False to hide it. * @param time Current timecode. */ static void setPrimVisibility(pxr::UsdPrim prim, bool visible, pxr::UsdTimeCode time = pxr::UsdTimeCode::Default()) { pxr::UsdGeomImageable imageable(prim); auto visibilityAttr = imageable.GetVisibilityAttr(); visibilityAttr.Set(visible ? pxr::UsdGeomTokens->inherited : pxr::UsdGeomTokens->invisible, time); } /** * Gets if prim have transforms at key time_code. * * @param The prim to check transform sequence. * @param time Current timecode. * @return if prim has transform at time_code */ // TODO : Need more tweak on each kinds of transformop after timesample authoring support all kinds of transformop static bool getPrimHasTransformAtKey(const pxr::UsdPrim& prim, pxr::UsdTimeCode time_code = pxr::UsdTimeCode::Default()) { pxr::UsdGeomXformable xform(prim); bool resetXFormStack; auto xformOps = xform.GetOrderedXformOps(&resetXFormStack); if (time_code.IsDefault()) { return false; } for (auto xformOp : xformOps) { std::vector<double> times; if (xformOp.GetTimeSamples(&times)) { if (std::find(times.begin(), times.end(), time_code.GetValue()) != times.end()) { return true; } } } return false; } /** * Gets local transform matrix of a prim. * * @param prim The prim to get local transform matrix from. * @param time Current timecode. * @return The local transform matrix. */ static pxr::GfMatrix4d getLocalTransformMatrix(const pxr::UsdPrim& prim, pxr::UsdTimeCode time = pxr::UsdTimeCode::Default()) { bool resetXformStack = false; pxr::UsdGeomXformable xform(prim); pxr::GfMatrix4d mat; xform.GetLocalTransformation(&mat, &resetXformStack, time); return mat; } /** * Gets GfRotation from XformOp that is UsdGeomXformOp::TypeOrient. * It is caller's responsibility to pass the xformOp with correct type. * * @param xformOp The UsdGeomXformOp to get rotation from. It must be of type UsdGeomXformOp::TypeOrient. * @param time The timecode to get value from. * @return GfRotation of the xformOp. The value is set to identity if failed to fetch from xformOp (mismatched type or undefined value). */ template <typename QuatT> static PXR_NS::GfRotation getRotationFromXformOpOrient(const PXR_NS::UsdGeomXformOp& xformOp, const PXR_NS::UsdTimeCode& time) { QuatT quat; if (xformOp.Get<QuatT>(&quat, time)) { return PXR_NS::GfRotation(quat); } return PXR_NS::GfRotation({ 1, 0, 0 }, 0); } /** * Gets local transform if applied in scale, rotation, translation order. * * Depending on the xformOpOrder of the prim, the returned value may not be identical to the final local * transform. Only use this function on simple SRT xformOpOrder or matrix. * * @param prim The prim to get local transform matrix from. * @param time translation to be written to. * @param time rotation euler angle (in degree) to be written to. * @param time rotation order to be written to. * @param time scale to be written to. * @param time Current timecode. * @return true if operation succeed. */ static bool getLocalTransformSRT(const PXR_NS::UsdPrim& prim, PXR_NS::GfVec3d& translation, PXR_NS::GfVec3d& rotation, PXR_NS::GfVec3i& rotationOrder, PXR_NS::GfVec3d &scale, PXR_NS::UsdTimeCode time = pxr::UsdTimeCode::Default()) { using namespace PXR_NS; bool resetXformStack = false; pxr::UsdGeomXformable xform(prim); std::vector<UsdGeomXformOp> extraXformOps; std::vector<UsdGeomXformOp> orderedXformOps = xform.GetOrderedXformOps(&resetXformStack); bool seenScale = false; uint32_t seenRotation = 0; // use a counter here, because euler angle can show up as individual xformOp. bool seenAxes[3] = { false, false, false }; bool seenTranslation = false; // default values scale.Set(1.0, 1.0, 1.0); rotation.Set(0.0, 0.0, 0.0); rotationOrder.Set(-1, -1, -1); // placeholder translation.Set(0.0, 0.0, 0.0); for (auto it = orderedXformOps.rbegin(); it != orderedXformOps.rend(); ++it) { const UsdGeomXformOp& xformOp = *it; if (xformOp.IsInverseOp()) { continue; } // A.B.temp solution to unblock a showstopper for supporting unitsResolve suffix xformOp stacks // for preop we need still full matrix computation for post op we can reconstruct at the end static const TfToken kUnitsResolve = TfToken("unitsResolve"); if (xformOp.HasSuffix(kUnitsResolve)) { if (xformOp == orderedXformOps[0]) { GfMatrix4d mtx; bool resetXformStack; xform.GetLocalTransformation(&mtx, &resetXformStack, time); pxr::GfMatrix4d rotMat(1.0); pxr::GfMatrix4d scaleOrientMatUnused, perspMatUnused; mtx.Factor(&scaleOrientMatUnused, &scale, &rotMat, &translation, &perspMatUnused); // By default decompose as XYZ order (make it an option?) GfVec3d decompRot = rotMat.ExtractRotation().Decompose(GfVec3d::ZAxis(), GfVec3d::YAxis(), GfVec3d::XAxis()); rotation = { decompRot[2], decompRot[1], decompRot[0] }; rotationOrder = { 0, 1, 2 }; return true; } else { extraXformOps.push_back(xformOp); } continue; } const UsdGeomXformOp::Type opType = xformOp.GetOpType(); const UsdGeomXformOp::Precision precision = xformOp.GetPrecision(); if (opType == UsdGeomXformOp::TypeTransform) { seenScale = true; seenRotation = 3; seenTranslation = true; GfMatrix4d mtx = xformOp.GetOpTransform(time); pxr::GfMatrix4d rotMat(1.0); pxr::GfMatrix4d scaleOrientMatUnused, perspMatUnused; mtx.Factor(&scaleOrientMatUnused, &scale, &rotMat, &translation, &perspMatUnused); // By default decompose as XYZ order (make it an option?) GfVec3d decompRot = rotMat.ExtractRotation().Decompose(GfVec3d::ZAxis(), GfVec3d::YAxis(), GfVec3d::XAxis()); rotation = { decompRot[2], decompRot[1], decompRot[0] }; rotationOrder = { 0, 1, 2 }; break; } if (!seenScale) { if (opType == UsdGeomXformOp::TypeScale) { if (seenRotation || seenTranslation) { CARB_LOG_WARN("Incompatible xformOpOrder, rotation or translation applied before scale."); } seenScale = true; xformOp.GetAs<>(&scale, time); } } if (seenRotation != 3) { if (opType >= UsdGeomXformOp::TypeRotateXYZ && opType <= UsdGeomXformOp::TypeRotateZYX) { if (seenTranslation || seenRotation != 0) { CARB_LOG_WARN("Incompatible xformOpOrder, translation applied before rotation or too many rotation ops."); } seenRotation = 3; xformOp.GetAs<>(&rotation, time); static const GfVec3i kRotationOrder[] = { { 0, 1, 2 }, // XYZ { 0, 2, 1 }, // XZY { 1, 0, 2 }, // YXZ { 1, 2, 0 }, // YZX { 2, 0, 1 }, // ZXY { 2, 1, 0 }, // ZYX }; rotationOrder = kRotationOrder[opType - UsdGeomXformOp::TypeRotateXYZ]; } else if (opType >= UsdGeomXformOp::TypeRotateX && opType <= UsdGeomXformOp::TypeRotateZ) { if (seenTranslation || seenRotation > 3) { CARB_LOG_WARN("Incompatible xformOpOrder, too many single axis rotation ops."); } // Set rotation order based on individual axis order rotationOrder[seenRotation++] = opType - UsdGeomXformOp::TypeRotateX; seenAxes[opType - UsdGeomXformOp::TypeRotateX] = true; double angle = 0.0; xformOp.GetAs<>(&angle, time); rotation[opType - UsdGeomXformOp::TypeRotateX] = angle; } else if (opType == UsdGeomXformOp::TypeOrient) { if (seenTranslation || seenRotation != 0) { CARB_LOG_WARN("Incompatible xformOpOrder, translation applied before rotation or too many rotation ops."); } seenRotation = 3; GfRotation rot; // GetAs cannot convert between Quath, Quatf and Quatd switch (precision) { case PXR_NS::UsdGeomXformOp::PrecisionHalf: { rot = getRotationFromXformOpOrient<GfQuath>(xformOp, time); break; } case PXR_NS::UsdGeomXformOp::PrecisionFloat: { rot = getRotationFromXformOpOrient<GfQuatf>(xformOp, time); break; } case PXR_NS::UsdGeomXformOp::PrecisionDouble: { rot = getRotationFromXformOpOrient<GfQuatd>(xformOp, time); break; } default: break; } // By default decompose as XYZ order (make it an option?) GfVec3d decompRot = rot.Decompose(GfVec3d::ZAxis(), GfVec3d::YAxis(), GfVec3d::XAxis()); rotation = { decompRot[2], decompRot[1], decompRot[0] }; rotationOrder = { 0, 1, 2 }; } } if (!seenTranslation) { // Do not get translation from pivot if (opType == UsdGeomXformOp::TypeTranslate && !xformOp.HasSuffix(PXR_NS::TfToken("pivot"))) { seenTranslation = true; xformOp.GetAs<>(&translation, time); } } } if (seenRotation == 0) { // If we did not see any rotation op, get it from the preferences static const std::unordered_map<std::string, PXR_NS::GfVec3i> s_orderMap{ { "XYZ", { 0, 1, 2 } }, { "XZY", { 0, 2, 1 } }, { "YXZ", { 1, 0, 2 } }, { "YZX", { 1, 2, 0 } }, { "ZXY", { 2, 0, 1 } }, { "ZYX", { 2, 1, 0 } }, }; PXR_NS::GfVec3i preferredDefaultRotationOrder = { 0, 1, 2 }; // fallback const char* orderStr = carb::getCachedInterface<carb::settings::ISettings>()->getStringBuffer(kDefaultRotationOrderSettingPath); const auto orderEntry = s_orderMap.find(orderStr); if (orderEntry != s_orderMap.end()) { preferredDefaultRotationOrder = orderEntry->second; } rotationOrder = preferredDefaultRotationOrder; seenRotation = 3; } else { // Assign rotation order to missing rotation ops after existing rotation ops for (size_t i = 0; i < 3; i++) { int32_t& order = rotationOrder[i]; if (order == -1) { for (int32_t j = 0; j < 3; j++) { if (!seenAxes[j]) { order = j; seenAxes[j] = true; break; } } } } } // A.B. this is a known transformation, we have a rotateX + -90 and scale // we can just add the X euler rotation and swap the scale Y, Z axis, this should represent the additonal transformation if (!extraXformOps.empty()) { for (const UsdGeomXformOp& extraOp : extraXformOps) { if (extraOp.GetOpType() == UsdGeomXformOp::Type::TypeScale) { PXR_NS::GfVec3d scaleValue; extraOp.GetAs<>(&scaleValue, time); scale = PXR_NS::GfCompMult(scale, scaleValue); } else if (extraOp.GetOpType() == UsdGeomXformOp::Type::TypeRotateX) { double rotValue; extraOp.GetAs<>(&rotValue, time); if (PXR_NS::GfIsClose(abs(rotValue), 90.0, 0.01)) { rotation[0] = rotation[0] + rotValue; std::swap(scale[1], scale[2]); } else { CARB_LOG_WARN( "UnitsResolve rotateX supports only +-90 degree on prim: %s", prim.GetPrimPath().GetText()); } } } } return true; } /** * Gets the inversed pivot transform matrix of a prim. * * @param prim The prim to get inversed pivot transform matrix from. * @param time Current timecode. * @return The inversed pivot transform matrix. If prim has no pivot, identity matrix will be returned. */ static pxr::GfMatrix4d getLocalTransformPivotInv(const pxr::UsdPrim& prim, pxr::UsdTimeCode time = pxr::UsdTimeCode::Default()) { pxr::UsdGeomXformable xform(prim); bool resetXFormStack; auto xformOps = xform.GetOrderedXformOps(&resetXFormStack); // potential pivot inv node if (xformOps.size()) { auto pivotOpInv = xformOps.back(); if (pivotOpInv.GetOpType() == pxr::UsdGeomXformOp::Type::TypeTranslate && pivotOpInv.HasSuffix(pxr::TfToken("pivot")) && pivotOpInv.IsInverseOp()) { return pivotOpInv.GetOpTransform(time); } } // return an identity matrix if no pivot is found return pxr::GfMatrix4d(1.f); } /** * Gets world transform matrix of a prim. * * @param prim The prim to get world transform matrix from. * @param time Current timecode. * @return The world transform matrix. */ static pxr::GfMatrix4d getWorldTransformMatrix(const pxr::UsdPrim& prim, pxr::UsdTimeCode time = pxr::UsdTimeCode::Default()) { pxr::UsdGeomXformable xform(prim); return xform.ComputeLocalToWorldTransform(time); } /** * Given a target local transform matrix for a prim, determine what value to set just * the transformOp when other xformOps are present. * * @param prim The prim in question * @param mtx The desired final transform matrix for the prim including all ops * @param foundTransformOp returns true if there is a transform xformOp */ static pxr::GfMatrix4d findInnerTransform(pxr::UsdPrim prim, const pxr::GfMatrix4d& mtx, bool& foundTransformOp, pxr::UsdTimeCode timecode = pxr::UsdTimeCode::Default(), bool skipEqualSetForTimeSample = false) { pxr::UsdGeomXformable xform(prim); bool resetXFormStack; auto xformOps = xform.GetOrderedXformOps(&resetXFormStack); foundTransformOp = false; bool foundOtherOps = false; pxr::GfMatrix4d preTransform = pxr::GfMatrix4d(1.); pxr::GfMatrix4d postTransform = pxr::GfMatrix4d(1.); for (auto xformOp : xformOps) { if (!foundTransformOp && xformOp.GetOpType() == pxr::UsdGeomXformOp::TypeTransform) { foundTransformOp = true; } else { bool isInverseOp = false; pxr::UsdGeomXformOp op(xformOp.GetAttr(), isInverseOp); if (op) { static const PXR_NS::TfToken kPivotSuffix("pivot"); if (op.HasSuffix(kPivotSuffix)) { continue; } // possibly check for identity and skip multiplication auto opTransform = op.GetOpTransform(timecode); if (foundTransformOp) { preTransform = opTransform * preTransform; } else { postTransform = opTransform * postTransform; } foundOtherOps = true; } } } if (foundTransformOp && foundOtherOps) { return preTransform.GetInverse() * mtx * postTransform.GetInverse(); } return mtx; } /** * Sets local transform matrix of a prim. * * @param prim The prim to set local transform matrix to. * @param mtx The local transform matrix. */ static bool setLocalTransformMatrix(pxr::UsdPrim prim, const pxr::GfMatrix4d& mtxIn, pxr::UsdTimeCode timecode = pxr::UsdTimeCode::Default(), bool skipEqualSetForTimeSample = false, std::unique_ptr<PXR_NS::SdfChangeBlock>* parentChangeBlock = nullptr) { // If prim is defined in session layer, we author in session layer. std::unique_ptr<PXR_NS::UsdEditContext> editCtx; auto mightDefOnSessionLayer = getLayerIfDefOnSessionOrItsSublayers(prim.GetStage(), prim.GetPath()); if (mightDefOnSessionLayer) { editCtx = std::make_unique<PXR_NS::UsdEditContext>(prim.GetStage(), mightDefOnSessionLayer); } carb::settings::ISettings* settings = carb::getCachedInterface<carb::settings::ISettings>(); bool fastUpdates = settings->getAsBool(kAuthorXformsWithFastUpdatesSettingPath); std::unique_ptr<PXR_NS::SdfChangeBlock> localChangeBlock; std::unique_ptr<PXR_NS::SdfChangeBlock>& changeBlock = (parentChangeBlock != nullptr) ? *parentChangeBlock : localChangeBlock; if (!changeBlock.get()) { // https://github.com/PixarAnimationStudios/USD/commit/5e38b2aac0693fcf441a607165346e42cd625b59 // fastUpdates have long been deprecated, and will be removed from the API // in nv-usd 22.05, as they not conflict with the "enabled" parameter // added by Pixar in USD v22.03 changeBlock.reset(new PXR_NS::SdfChangeBlock()); } pxr::UsdGeomXformable xform(prim); bool resetXFormStack; auto xformOps = xform.GetOrderedXformOps(&resetXFormStack); PXR_NS::VtTokenArray xformOpOrders; xform.GetXformOpOrderAttr().Get(&xformOpOrders); bool foundTransformOp = false; PXR_NS::UsdGeomXformOp transformOp; bool success = true; pxr::GfMatrix4d mtx = mtxIn; const pxr::GfMatrix4d innerMtx = findInnerTransform(prim, mtx, foundTransformOp, timecode, skipEqualSetForTimeSample); for (auto xformOp : xformOps) { // Found transform op, trying to set its value if (xformOp.GetOpType() == pxr::UsdGeomXformOp::TypeTransform) { foundTransformOp = true; success &= UsdUtils::setAttribute(xformOp.GetAttr(), innerMtx, timecode, skipEqualSetForTimeSample); } } // If transformOp is not found, make individual xformOp or reuse old ones. if (!foundTransformOp) { // A.B.temp solution to unblock a showstopper for supporting unitsResolve suffix xformOp stacks static const PXR_NS::TfToken kUnitsResolve = PXR_NS::TfToken("unitsResolve"); bool preTransformStack = false; std::vector<PXR_NS::UsdGeomXformOp> extraXformOps; for (auto& xformOp : xformOps) { if (xformOp.HasSuffix(kUnitsResolve)) { extraXformOps.push_back(xformOp); if (xformOp == xformOps[0]) { preTransformStack = true; } } } // A.B.temp solution to unblock a showstopper for supporting unitsResolve suffix xformOp stacks // reconstruct the values back after modifying the incoming transform if (!extraXformOps.empty()) { PXR_NS::GfMatrix4d extraTransform(1.0); for (auto& extraOp : extraXformOps) { extraTransform *= extraOp.GetOpTransform(timecode); } const PXR_NS::GfMatrix4d extraTransformInv = extraTransform.GetInverse(); if (preTransformStack) { mtx = mtx * extraTransformInv; } else { mtx = extraTransformInv * mtx; } } pxr::GfVec3d translation; pxr::GfMatrix4d rotMat(1.0); pxr::GfVec3d doubleScale(1.0); pxr::GfMatrix4d scaleOrientMatUnused, perspMatUnused; mtx.Factor(&scaleOrientMatUnused, &doubleScale, &rotMat, &translation, &perspMatUnused); rotMat.Orthonormalize(false); pxr::GfRotation rotation = rotMat.ExtractRotation(); // Don't use UsdGeomXformCommonAPI. It can only manipulate a very limited subset of xformOpOrder // combinations Do it manually as non-destructively as possible pxr::UsdGeomXformOp xformOp; std::vector<pxr::UsdGeomXformOp> newXformOps; if (!extraXformOps.empty() && preTransformStack) { for (auto& copyXformOp : extraXformOps) { newXformOps.push_back(copyXformOp); } } auto findOrAdd = [&xformOps, &xformOpOrders, &xform, &changeBlock, &fastUpdates]( pxr::UsdGeomXformOp::Type xformOpType, pxr::UsdGeomXformOp& outXformOp, bool createIfNotExist, pxr::UsdGeomXformOp::Precision& precision, pxr::TfToken const& opSuffix = pxr::TfToken()) { for (auto xformOp : xformOps) { if (xformOp.GetOpType() == xformOpType) { // To differentiate translate and translate:pivot const pxr::TfToken expectedOpName = pxr::UsdGeomXformOp::GetOpName(xformOpType, opSuffix); const pxr::TfToken opName = xformOp.GetOpName(); if (opName == expectedOpName) { precision = xformOp.GetPrecision(); outXformOp = xformOp; return true; } } } if (createIfNotExist) { // It is not safe to create new xformOps inside of SdfChangeBlocks, since // new attribute creation via anything above Sdf API requires the PcpCache // to be up to date. Flush the current change block before creating // the new xformOp. changeBlock.reset(nullptr); if (std::find(xformOpOrders.begin(), xformOpOrders.end(), pxr::UsdGeomXformOp::GetOpName(xformOpType, opSuffix)) == xformOpOrders.end()) outXformOp = xform.AddXformOp(xformOpType, precision, opSuffix); else { // Sometimes XformOp attributes and XformOpOrder don't match. GetOrderedXformOps() considers both XformOp attributes and XformOpOrder. But AddXformOp() considers only XformOpOrder. So we need to fix it here. auto opAttr = xform.GetPrim().CreateAttribute( pxr::UsdGeomXformOp::GetOpName(xformOpType, opSuffix), pxr::UsdGeomXformOp::GetValueTypeName(xformOpType, precision), false); outXformOp = pxr::UsdGeomXformOp(opAttr); } // Create a new change block to batch the subsequent authoring operations // where possible. changeBlock.reset(new PXR_NS::SdfChangeBlock()); // Creation may have failed for a variety of reasons (including instanceable=True) return static_cast<bool>(outXformOp); } return false; }; auto getFirstRotateOpType = [&xformOps](pxr::UsdGeomXformOp::Precision& precision) { for (auto xformOp : xformOps) { if (xformOp.GetOpType() >= pxr::UsdGeomXformOp::Type::TypeRotateX && xformOp.GetOpType() <= pxr::UsdGeomXformOp::Type::TypeOrient && !xformOp.HasSuffix(kUnitsResolve)) { precision = xformOp.GetPrecision(); return xformOp.GetOpType(); } } return pxr::UsdGeomXformOp::Type::TypeInvalid; }; auto decomposeAndSetValue = [&rotation, &findOrAdd, &newXformOps]( pxr::UsdGeomXformOp::Type rotationType, const pxr::GfVec3d& axis0, const pxr::GfVec3d& axis1, const pxr::GfVec3d& axis2, size_t xIndex, size_t yIndex, size_t zIndex, pxr::UsdGeomXformOp::Precision precision, pxr::UsdTimeCode timecode, bool skipEqualSetForTimeSample) { bool ret = false; pxr::GfVec3d angles = rotation.Decompose(axis0, axis1, axis2); pxr::GfVec3d rotate = { angles[xIndex], angles[yIndex], angles[zIndex] }; pxr::UsdGeomXformOp xformOp; if (findOrAdd(rotationType, xformOp, true, precision)) { ret = setValueWithPrecision<pxr::GfVec3h, pxr::GfVec3f, pxr::GfVec3d, pxr::GfVec3d>( xformOp, rotate, timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } return ret; }; // Set translation pxr::UsdGeomXformOp::Precision precision = pxr::UsdGeomXformOp::PrecisionDouble; if (findOrAdd(pxr::UsdGeomXformOp::TypeTranslate, xformOp, true, precision)) { success &= setValueWithPrecision<pxr::GfVec3h, pxr::GfVec3f, pxr::GfVec3d, pxr::GfVec3d>( xformOp, translation, timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } // Set pivot static const pxr::TfToken kPivot = pxr::TfToken("pivot"); precision = pxr::UsdGeomXformOp::PrecisionFloat; pxr::UsdGeomXformOp pivotOp; pxr::UsdGeomXformOp pivotOpInv; pxr::GfVec3d pivotValue(0., 0., 0.); const bool hasPivot = findOrAdd(pxr::UsdGeomXformOp::TypeTranslate, pivotOp, false, precision, kPivot); if (hasPivot) { newXformOps.push_back(pivotOp); for (size_t k = xformOps.size(); k--;) { if (xformOps[k].IsInverseOp() && xformOps[k].HasSuffix(kPivot)) { pivotOpInv = xformOps[k]; break; } } } // Set rotation precision = pxr::UsdGeomXformOp::PrecisionFloat; auto firstRotateOpType = getFirstRotateOpType(precision); if (firstRotateOpType == pxr::UsdGeomXformOp::TypeInvalid) { static const std::unordered_map<std::string, PXR_NS::UsdGeomXformOp::Type> s_typeMap{ { "XYZ", PXR_NS::UsdGeomXformOp::TypeRotateXYZ }, { "XZY", PXR_NS::UsdGeomXformOp::TypeRotateXZY }, { "YXZ", PXR_NS::UsdGeomXformOp::TypeRotateYXZ }, { "YZX", PXR_NS::UsdGeomXformOp::TypeRotateYZX }, { "ZXY", PXR_NS::UsdGeomXformOp::TypeRotateZXY }, { "ZYX", PXR_NS::UsdGeomXformOp::TypeRotateZYX }, }; firstRotateOpType = PXR_NS::UsdGeomXformOp::TypeRotateXYZ; // fallback const char* orderStr = carb::getCachedInterface<carb::settings::ISettings>()->getStringBuffer( kDefaultRotationOrderSettingPath); const auto orderEntry = s_typeMap.find(orderStr); if (orderEntry != s_typeMap.end()) { firstRotateOpType = orderEntry->second; } } switch (firstRotateOpType) { case pxr::UsdGeomXformOp::TypeRotateX: case pxr::UsdGeomXformOp::TypeRotateY: case pxr::UsdGeomXformOp::TypeRotateZ: { pxr::GfVec3d angles = rotation.Decompose(pxr::GfVec3d::ZAxis(), pxr::GfVec3d::YAxis(), pxr::GfVec3d::XAxis()); pxr::GfVec3d rotateZYX = { angles[2], angles[1], angles[0] }; if (findOrAdd(pxr::UsdGeomXformOp::TypeRotateZ, xformOp, true, precision)) { success &= setValueWithPrecision<pxr::GfHalf, float, double, double>( xformOp, rotateZYX[2], timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } if (findOrAdd(pxr::UsdGeomXformOp::TypeRotateY, xformOp, true, precision)) { success &= setValueWithPrecision<pxr::GfHalf, float, double, double>( xformOp, rotateZYX[1], timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } if (findOrAdd(pxr::UsdGeomXformOp::TypeRotateX, xformOp, true, precision)) { success &= setValueWithPrecision<pxr::GfHalf, float, double, double>( xformOp, rotateZYX[0], timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } break; } case pxr::UsdGeomXformOp::TypeRotateZYX: success &= decomposeAndSetValue(firstRotateOpType, pxr::GfVec3d::XAxis(), pxr::GfVec3d::YAxis(), pxr::GfVec3d::ZAxis(), 0, 1, 2, precision, timecode, skipEqualSetForTimeSample); break; case pxr::UsdGeomXformOp::TypeRotateXZY: success &= decomposeAndSetValue(firstRotateOpType, pxr::GfVec3d::YAxis(), pxr::GfVec3d::ZAxis(), pxr::GfVec3d::XAxis(), 2, 0, 1, precision, timecode, skipEqualSetForTimeSample); break; case pxr::UsdGeomXformOp::TypeRotateYXZ: success &= decomposeAndSetValue(firstRotateOpType, pxr::GfVec3d::ZAxis(), pxr::GfVec3d::XAxis(), pxr::GfVec3d::YAxis(), 1, 2, 0, precision, timecode, skipEqualSetForTimeSample); break; case pxr::UsdGeomXformOp::TypeRotateYZX: success &= decomposeAndSetValue(firstRotateOpType, pxr::GfVec3d::XAxis(), pxr::GfVec3d::ZAxis(), pxr::GfVec3d::YAxis(), 0, 2, 1, precision, timecode, skipEqualSetForTimeSample); break; case pxr::UsdGeomXformOp::TypeRotateZXY: success &= decomposeAndSetValue(firstRotateOpType, pxr::GfVec3d::YAxis(), pxr::GfVec3d::XAxis(), pxr::GfVec3d::ZAxis(), 1, 0, 2, precision, timecode, skipEqualSetForTimeSample); break; case pxr::UsdGeomXformOp::TypeOrient: if (findOrAdd(pxr::UsdGeomXformOp::TypeOrient, xformOp, false, precision)) { success &= setValueWithPrecision<pxr::GfQuath, pxr::GfQuatf, pxr::GfQuatd, pxr::GfQuatd>( xformOp, rotation.GetQuat(), timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } break; case pxr::UsdGeomXformOp::TypeRotateXYZ: default: success &= decomposeAndSetValue(pxr::UsdGeomXformOp::TypeRotateXYZ, pxr::GfVec3d::ZAxis(), pxr::GfVec3d::YAxis(), pxr::GfVec3d::XAxis(), 2, 1, 0, precision, timecode, skipEqualSetForTimeSample); break; } // Set scale precision = pxr::UsdGeomXformOp::PrecisionFloat; if (findOrAdd(pxr::UsdGeomXformOp::TypeScale, xformOp, true, precision)) { success &= setValueWithPrecision<pxr::GfVec3h, pxr::GfVec3f, pxr::GfVec3d, pxr::GfVec3d>( xformOp, doubleScale, timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } // Set extra ops from units resolve if (!extraXformOps.empty() && !preTransformStack) { for (auto& copyXformOp : extraXformOps) { newXformOps.push_back(copyXformOp); } } // Set inverse pivot if (hasPivot && pivotOpInv) { // Assume the last xformOps is the pivot newXformOps.push_back(pivotOpInv); } success &= xform.SetXformOpOrder(newXformOps, resetXFormStack); } return success; } static PXR_NS::GfMatrix4d constructTransformMatrixFromSRT(const PXR_NS::GfVec3d& translation, const PXR_NS::GfVec3d& rotationEuler, const PXR_NS::GfVec3i& rotationOrder, const PXR_NS::GfVec3d& scale) { using namespace PXR_NS; GfMatrix4d transMtx, rotMtx, scaleMtx; transMtx.SetTranslate(translation); static const GfVec3d kAxes[] = { GfVec3d::XAxis(), GfVec3d::YAxis(), GfVec3d::ZAxis() }; GfRotation rotation = GfRotation(kAxes[rotationOrder[0]], rotationEuler[rotationOrder[0]]) * GfRotation(kAxes[rotationOrder[1]], rotationEuler[rotationOrder[1]]) * GfRotation(kAxes[rotationOrder[2]], rotationEuler[rotationOrder[2]]); rotMtx.SetRotate(rotation); scaleMtx.SetScale(scale); return scaleMtx * rotMtx * transMtx; } /** * Sets local transform matrix of a prim. * * @param prim The prim to set local transform matrix to. * @param mtx The local transform matrix. */ static bool setLocalTransformSRT(PXR_NS::UsdPrim prim, const PXR_NS::GfVec3d& translationIn, const PXR_NS::GfVec3d& rotationEulerIn, const PXR_NS::GfVec3i& rotationOrder, const PXR_NS::GfVec3d& scaleIn, PXR_NS::UsdTimeCode timecode = PXR_NS::UsdTimeCode::Default(), bool skipEqualSetForTimeSample = false, std::unique_ptr<PXR_NS::SdfChangeBlock>* parentChangeBlock = nullptr) { using namespace PXR_NS; // If prim is defined in session layer, we author in session layer. std::unique_ptr<PXR_NS::UsdEditContext> editCtx; auto mightDefOnSessionLayer = getLayerIfDefOnSessionOrItsSublayers(prim.GetStage(), prim.GetPath()); if (mightDefOnSessionLayer) { editCtx = std::make_unique<PXR_NS::UsdEditContext>(prim.GetStage(), mightDefOnSessionLayer); } carb::settings::ISettings* settings = carb::getCachedInterface<carb::settings::ISettings>(); bool fastUpdates = settings->getAsBool(kAuthorXformsWithFastUpdatesSettingPath); std::unique_ptr<SdfChangeBlock> localChangeBlock; std::unique_ptr<SdfChangeBlock>& changeBlock = (parentChangeBlock != nullptr) ? *parentChangeBlock : localChangeBlock; if (!changeBlock.get()) { changeBlock.reset(new SdfChangeBlock()); } UsdGeomXformable xform(prim); bool resetXFormStack; auto xformOps = xform.GetOrderedXformOps(&resetXFormStack); PXR_NS::VtTokenArray xformOpOrders; xform.GetXformOpOrderAttr().Get(&xformOpOrders); bool foundTransformOp = false; UsdGeomXformOp transformOp; bool success = true; PXR_NS::GfVec3d translation = translationIn; PXR_NS::GfVec3d rotationEuler = rotationEulerIn; PXR_NS::GfVec3d scale = scaleIn; // A.B.temp solution to unblock a showstopper for supporting unitsResolve suffix xformOp stacks static const TfToken kUnitsResolve = TfToken("unitsResolve"); bool preTransformStack = false; std::vector<UsdGeomXformOp> extraXformOps; for (auto& xformOp : xformOps) { // Found transform op, trying to set its value if (xformOp.GetOpType() == UsdGeomXformOp::TypeTransform) { foundTransformOp = true; transformOp = xformOp; } else if (xformOp.HasSuffix(kUnitsResolve)) { extraXformOps.push_back(xformOp); if (xformOp == xformOps[0]) { preTransformStack = true; } } } // A.B.temp solution to unblock a showstopper for supporting unitsResolve suffix xformOp stacks // reconstruct the values back after modifying the incoming transform if (!extraXformOps.empty() && !foundTransformOp) { if (preTransformStack) { GfMatrix4d mtx = constructTransformMatrixFromSRT(translation, rotationEuler, rotationOrder, scale); GfMatrix4d extraTransform(1.0); for (auto& extraOp : extraXformOps) { extraTransform *= extraOp.GetOpTransform(timecode); } const GfMatrix4d extraTransformInv = extraTransform.GetInverse(); mtx = mtx * extraTransformInv; PXR_NS::GfVec3d translationNew; PXR_NS::GfVec3d rotationNew; PXR_NS::GfMatrix4d rotMat(1.0); PXR_NS::GfVec3d scaleNew(1.0); PXR_NS::GfMatrix4d scaleOrientMatUnused, perspMatUnused; mtx.Factor( &scaleOrientMatUnused, &scaleNew, &rotMat, &translationNew, &perspMatUnused); static const PXR_NS::GfVec3d kAxis[] = { PXR_NS::GfVec3d::XAxis(), PXR_NS::GfVec3d::YAxis(), PXR_NS::GfVec3d::ZAxis() }; auto decompRot = rotMat.ExtractRotation().Decompose( kAxis[rotationOrder[2]], kAxis[rotationOrder[1]], kAxis[rotationOrder[0]]); PXR_NS::GfVec3i indexOrder; for (int32_t i = 0; i < 3; i++) { indexOrder[rotationOrder[i]] = 2 - i; } rotationNew[0] = decompRot[indexOrder[0]]; rotationNew[1] = decompRot[indexOrder[1]]; rotationNew[2] = decompRot[indexOrder[2]]; translation = translationNew; rotationEuler = rotationNew; scale = scaleNew; } else { // Post transform, we should know what to do for (const UsdGeomXformOp& extraOp : extraXformOps) { if (extraOp.GetOpType() == UsdGeomXformOp::Type::TypeScale) { PXR_NS::GfVec3d scaleValue; extraOp.GetAs<>(&scaleValue, timecode); scale = PXR_NS::GfCompDiv(scale, scaleValue); } else if (extraOp.GetOpType() == UsdGeomXformOp::Type::TypeRotateX) { double rotValue; extraOp.GetAs<>(&rotValue, timecode); if (PXR_NS::GfIsClose(abs(rotValue), 90.0, 0.01)) { rotationEuler[0] = rotationEuler[0] - rotValue; std::swap(scale[1], scale[2]); } else { CARB_LOG_WARN( "UnitsResolve rotateX supports only +-90 degree on prim: %s", prim.GetPrimPath().GetText()); } } } } } // If transformOp is not found, make individual xformOp or reuse old ones. if (!foundTransformOp) { // Don't use UsdGeomXformCommonAPI. It can only manipulate a very limited subset of xformOpOrder // combinations Do it manually as non-destructively as possible UsdGeomXformOp xformOp; std::vector<UsdGeomXformOp> newXformOps; newXformOps.reserve(5); // SRT + pivot and pivot inv if (!extraXformOps.empty() && preTransformStack) { for (auto& copyXformOp : extraXformOps) { newXformOps.push_back(copyXformOp); } } auto findOrAdd = [&xformOps, &xformOpOrders, &xform, &changeBlock, &fastUpdates]( UsdGeomXformOp::Type xformOpType, UsdGeomXformOp& outXformOp, bool createIfNotExist, UsdGeomXformOp::Precision& precision, TfToken const& opSuffix = TfToken()) { TfToken expectedOpName = UsdGeomXformOp::GetOpName(xformOpType, opSuffix); for (auto& xformOp : xformOps) { if (xformOp.GetOpType() == xformOpType) { // To differentiate translate and translate:pivot TfToken opName = xformOp.GetOpName(); if (opName == expectedOpName) { precision = xformOp.GetPrecision(); outXformOp = xformOp; return true; } } } if (createIfNotExist) { // It is not safe to create new xformOps inside of SdfChangeBlocks, since // new attribute creation via anything above Sdf API requires the PcpCache // to be up to date. Flush the current change block before creating // the new xformOp. changeBlock.reset(nullptr); if (std::find(xformOpOrders.begin(), xformOpOrders.end(), expectedOpName) == xformOpOrders.end()) { outXformOp = xform.AddXformOp(xformOpType, precision, opSuffix); } else { // Sometimes XformOp attributes and XformOpOrder don't match. GetOrderedXformOps() considers // both XformOp attributes and XformOpOrder. But AddXformOp() considers only XformOpOrder. So we // need to fix it here. auto opAttr = xform.GetPrim().CreateAttribute( expectedOpName, UsdGeomXformOp::GetValueTypeName(xformOpType, precision), false); outXformOp = UsdGeomXformOp(opAttr); } // Create a new change block to batch the subsequent authoring operations // where possible. changeBlock.reset(new SdfChangeBlock()); // Creation may have failed for a variety of reasons (including instanceable=True) return static_cast<bool>(outXformOp); } return false; }; auto getFirstRotateOpType = [&xformOps](UsdGeomXformOp::Precision& precision) { for (auto& xformOp : xformOps) { if (xformOp.GetOpType() >= UsdGeomXformOp::Type::TypeRotateX && xformOp.GetOpType() <= UsdGeomXformOp::Type::TypeOrient && !xformOp.HasSuffix(kUnitsResolve)) { precision = xformOp.GetPrecision(); return xformOp.GetOpType(); } } return UsdGeomXformOp::Type::TypeInvalid; }; auto setEulerValue = [&rotationEuler, &findOrAdd, &newXformOps]( UsdGeomXformOp::Type rotationType, UsdGeomXformOp::Precision precision, UsdTimeCode timecode, bool skipEqualSetForTimeSample) { bool ret = false; UsdGeomXformOp xformOp; if (findOrAdd(rotationType, xformOp, true, precision)) { ret = setValueWithPrecision<GfVec3h, GfVec3f, GfVec3d, GfVec3d>( xformOp, rotationEuler, timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } return ret; }; // Set translation UsdGeomXformOp::Precision precision = UsdGeomXformOp::PrecisionDouble; if (findOrAdd(UsdGeomXformOp::TypeTranslate, xformOp, true, precision)) { success &= setValueWithPrecision<GfVec3h, GfVec3f, GfVec3d, GfVec3d>( xformOp, translation, timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } // Set pivot precision = UsdGeomXformOp::PrecisionFloat; UsdGeomXformOp pivotOp; static const TfToken kPivot = TfToken("pivot"); const bool hasPivot = findOrAdd(UsdGeomXformOp::TypeTranslate, pivotOp, false, precision, kPivot); UsdGeomXformOp pivotOpInv; if (hasPivot) { newXformOps.push_back(pivotOp); for (size_t k = xformOps.size();k--;) { if (xformOps[k].IsInverseOp() && xformOps[k].HasSuffix(kPivot)) { pivotOpInv = xformOps[k]; break; } } } // Set rotation precision = UsdGeomXformOp::PrecisionFloat; auto firstRotateOpType = getFirstRotateOpType(precision); struct HashFn { size_t operator()(const GfVec3i& vec) const { return hash_value(vec); } }; static const std::unordered_map<GfVec3i, UsdGeomXformOp::Type, HashFn> kRotationOrderToTypeMap{ { { 0, 1, 2 }, UsdGeomXformOp::TypeRotateXYZ }, { { 0, 2, 1 }, UsdGeomXformOp::TypeRotateXZY }, { { 1, 0, 2 }, UsdGeomXformOp::TypeRotateYXZ }, { { 1, 2, 0 }, UsdGeomXformOp::TypeRotateYZX }, { { 2, 0, 1 }, UsdGeomXformOp::TypeRotateZXY }, { { 2, 1, 0 }, UsdGeomXformOp::TypeRotateZYX }, }; if (firstRotateOpType == UsdGeomXformOp::TypeInvalid) { const char* defaultOpXformType = carb::getCachedInterface<carb::settings::ISettings>()->getStringBuffer(kDefaultXforOpTypeSettingPath); if (defaultOpXformType && strncmp(defaultOpXformType, "Scale, Orient, Translate", sizeof("Scale, Orient, Translate") - 1) == 0) { firstRotateOpType = UsdGeomXformOp::TypeOrient; } else { // TODO what if default_xform_ops == "Transform" ? // If no rotation was defined on the prim, use rotationOrder as default const auto orderEntry = kRotationOrderToTypeMap.find(rotationOrder); if (orderEntry != kRotationOrderToTypeMap.end()) { firstRotateOpType = orderEntry->second; } } } switch (firstRotateOpType) { case UsdGeomXformOp::TypeRotateX: case UsdGeomXformOp::TypeRotateY: case UsdGeomXformOp::TypeRotateZ: { // Add in reverse order for (int32_t i = 2; i >= 0; i--) { size_t axis = rotationOrder[i]; if (findOrAdd(static_cast<UsdGeomXformOp::Type>(UsdGeomXformOp::TypeRotateX + axis), xformOp, true, precision)) { success &= setValueWithPrecision<GfHalf, float, double, double>( xformOp, rotationEuler[axis], timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } } break; } case UsdGeomXformOp::TypeRotateXYZ: case UsdGeomXformOp::TypeRotateXZY: case UsdGeomXformOp::TypeRotateYXZ: case UsdGeomXformOp::TypeRotateYZX: case UsdGeomXformOp::TypeRotateZXY: case UsdGeomXformOp::TypeRotateZYX: { UsdGeomXformOp::Type providedRotationOrder = firstRotateOpType; const auto orderEntry = kRotationOrderToTypeMap.find(rotationOrder); if (orderEntry != kRotationOrderToTypeMap.end()) { providedRotationOrder = orderEntry->second; } if (providedRotationOrder != firstRotateOpType) { CARB_LOG_WARN( "Existing rotation order on prim %s is different than desired (%d, %d, %d), overriding...", prim.GetPrimPath().GetText(), rotationOrder[0], rotationOrder[1], rotationOrder[2]); } success &= setEulerValue(providedRotationOrder, precision, timecode, skipEqualSetForTimeSample); break; } case UsdGeomXformOp::TypeOrient: if (findOrAdd(UsdGeomXformOp::TypeOrient, xformOp, true, precision)) { static const GfVec3d kAxes[] = { GfVec3d::XAxis(), GfVec3d::YAxis(), GfVec3d::ZAxis() }; GfRotation rotation = GfRotation(kAxes[rotationOrder[0]], rotationEuler[rotationOrder[0]]) * GfRotation(kAxes[rotationOrder[1]], rotationEuler[rotationOrder[1]]) * GfRotation(kAxes[rotationOrder[2]], rotationEuler[rotationOrder[2]]); success &= setValueWithPrecision<GfQuath, GfQuatf, GfQuatd, GfQuatd>( xformOp, rotation.GetQuat(), timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } break; default: CARB_LOG_ERROR("Failed to determine rotation order"); } // Set scale precision = UsdGeomXformOp::PrecisionFloat; if (findOrAdd(UsdGeomXformOp::TypeScale, xformOp, true, precision)) { success &= setValueWithPrecision<GfVec3h, GfVec3f, GfVec3d, GfVec3d>( xformOp, scale, timecode, skipEqualSetForTimeSample); newXformOps.push_back(xformOp); } // Set extra ops from units resolve if (!extraXformOps.empty() && !preTransformStack) { for (auto& copyXformOp : extraXformOps) { newXformOps.push_back(copyXformOp); } } // Set inverse pivot if (hasPivot && pivotOpInv) { newXformOps.push_back(pivotOpInv); } success &= xform.SetXformOpOrder(newXformOps, resetXFormStack); } else { const GfMatrix4d mtx = constructTransformMatrixFromSRT(translation, rotationEuler, rotationOrder, scale); const GfMatrix4d innerMtx = findInnerTransform(prim, mtx, foundTransformOp, timecode, skipEqualSetForTimeSample); success &= UsdUtils::setAttribute(transformOp.GetAttr(), innerMtx, timecode, skipEqualSetForTimeSample); } return success; } /** * Sets local transform of a prim from its world transform matrix. * * @param prim The prim to set local transform matrix to. * @param time Current timecode. * @param mtx The world transform matrix. */ static bool setLocalTransformFromWorldTransformMatrix(pxr::UsdPrim prim, const pxr::GfMatrix4d& mtx, pxr::UsdTimeCode timeCode = pxr::UsdTimeCode::Default(), bool skipEqualSetForTimeSample = false) { pxr::GfMatrix4d parentToWorldMat = pxr::UsdGeomXformable(prim).ComputeParentToWorldTransform(timeCode); pxr::GfMatrix4d worldToParentMat = parentToWorldMat.GetInverse(); return UsdUtils::setLocalTransformMatrix(prim, mtx * worldToParentMat, timeCode, skipEqualSetForTimeSample); } /** * Creates a reference prim from external USD asset. * * @param stage The stage to create reference prim on. * @param refUrl The URL of the reference asset. * @param primPath The path to create the reference prim at. * @param warningMsg Output warning message during the process. * @param prependDefaultPrimPath Whether to check under defaultPrim path or stage root. */ static pxr::UsdPrim createExternalRefNodeAtPath(pxr::UsdStageWeakPtr stage, const char* refUrl, const char* primPath, std::string& warningMsg, bool prependDefaultPrimPath = true, bool adjustLayerOffset = true) { pxr::UsdPrim prim; std::string newPrimPath = findNextNoneExisitingNodePath(stage, primPath, prependDefaultPrimPath); std::string refUrlStr(refUrl); std::replace(refUrlStr.begin(), refUrlStr.end(), '\\', '/'); std::string relativeUrl = refUrlStr; if (!makePathRelativeToLayer(stage->GetEditTarget().GetLayer(), relativeUrl)) { #if CARB_PLATFORM_LINUX // makePathRelativeToLayer returns false, it is a absolute path. auto omniclient = carb::getCachedInterface<carb::omniclient::IOmniClient>(); carb::omniclient::OmniClientUrlPtr url(omniclient, relativeUrl.c_str()); if (!url->scheme || url->scheme[0] == '\0') { relativeUrl = carb::omniclient::makeFileUrl(omniclient, relativeUrl.c_str()); } #endif } carb::settings::ISettings* settings = carb::getFramework()->acquireInterface<carb::settings::ISettings>(); bool xformForTypelessRef = settings->getAsBool(kCreateXformForTypelessReferenceSettingPath); bool explictRefForNoneDefaultPrim = settings->getAsBool(kCreateExplicitRefForNoneDefaultPrimSettingPath); pxr::UsdStageRefPtr refStage = pxr::UsdStage::Open(refUrl, pxr::UsdStage::InitialLoadSet::LoadNone); if (refStage) { PXR_NS::SdfLayerOffset layerOffset; if (adjustLayerOffset) layerOffset.SetScale(stage->GetTimeCodesPerSecond() / refStage->GetTimeCodesPerSecond()); if (refStage->HasDefaultPrim() && refStage->GetDefaultPrim()) { auto defaultPrim = refStage->GetDefaultPrim(); const std::string& typeName = defaultPrim.GetTypeName().GetString(); if (typeName.empty() && xformForTypelessRef) { warningMsg = "The defaultPrim of this USD is typeless. Overriding it to Xform. Do not rely on this behavior, it will be removed at anytime."; // If the defaultPrim is typeless, // define a root Xfrom node for the external refs so it can be moved around prim = pxr::UsdGeomXform::Define(stage, pxr::SdfPath(newPrimPath)).GetPrim(); } else { prim = stage->DefinePrim(pxr::SdfPath(newPrimPath)); } if (prim) { // If has DefaultPrim, reference it directly. auto ref = prim.GetReferences(); ref.AddReference(relativeUrl, layerOffset); } } else if (explictRefForNoneDefaultPrim) { if (refStage->HasDefaultPrim() && !refStage->GetDefaultPrim()) { warningMsg = "The USD file has a defaultPrim but references an invalid prim, creating explicit reference for each root-level object. Do not rely on this behavior, it will be removed at anytime."; } else if (!refStage->HasDefaultPrim()) { warningMsg = "There's no defaultPrim in this USD file, creating explicit reference for each root-level object. Do not rely on this behavior, it will be removed at anytime."; } // Define a root Xfrom node for the external refs prim = pxr::UsdGeomXform::Define(stage, pxr::SdfPath(newPrimPath)).GetPrim(); // If has no defaultPrim, reference each root level Prim individually. auto refRoot = refStage->GetPseudoRoot(); auto refRootDirectChildren = refRoot.GetAllChildren(); // iterate through all root level children and create reference for them for (const auto& child : refRootDirectChildren) { std::string newPath = newPrimPath + child.GetPrimPath().GetString(); auto subPrim = stage->DefinePrim(pxr::SdfPath(newPath)); if (subPrim) { auto ref = subPrim.GetReferences(); ref.AddReference(relativeUrl, child.GetPrimPath(), layerOffset); } else { CARB_LOG_WARN("Failed to import %s from %s", child.GetPrimPath().GetText(), refUrl); } } } if (prim) { // Get the timeline of the referenced stage and extend current stage timeline if needed. double refStartTimeCode = refStage->GetStartTimeCode(); double refEndTimeCode = refStage->GetEndTimeCode(); double refTimeCodePerSecond = refStage->GetTimeCodesPerSecond(); double startTimeCode = stage->GetStartTimeCode(); double endTimeCode = stage->GetEndTimeCode(); double timeCodePerSecond = stage->GetTimeCodesPerSecond(); if (refStartTimeCode < startTimeCode) { UsdUtils::ScopedLayerEdit scopedLayerEdit(stage, stage->GetRootLayer()); stage->SetStartTimeCode(refStartTimeCode); } if (refEndTimeCode > endTimeCode) { UsdUtils::ScopedLayerEdit scopedLayerEdit(stage, stage->GetRootLayer()); stage->SetEndTimeCode(refEndTimeCode); } // Also warn if fps of referenced if (refTimeCodePerSecond != timeCodePerSecond) { warningMsg = "Referenced USD file TimeCodesPerSecond does not match current USD file TimeCodesPerSecond."; } } } if (!prim) { warningMsg = std::string("Could not create reference from ") + refUrl; } return prim; } /** * Gets the enclosing model of given prim. * * @param prim The prim to get enclosing model from. * @return The enclosing model of given prim. */ static pxr::UsdPrim getEnclosingModelPrim(pxr::UsdPrim prim) { // Code partially ported from USD\pxr\usdImaging\lib\usdviewq\common.py while (prim) { pxr::UsdModelAPI modelApi(prim); pxr::TfToken kind; modelApi.GetKind(&kind); if (pxr::KindRegistry::IsA(kind, pxr::KindTokens->model)) { break; } prim = prim.GetParent(); } return prim; } /** * Takes an absolute path and makes it relative to a layer. * * @param layer The layer to make path relative to. * @param path The absolute path to be converted in place. * @return true if the conversion is successful, false if failed to make relative path (possible reasons: layer is * in-memory only, layer and path have different protocol (omni: vs file:), layer and path are on different Window * drive etc.). */ static bool makePathRelativeToLayer(pxr::SdfLayerHandle layer, std::string& path) { if (layer->IsAnonymous() || PXR_NS::SdfLayer::IsAnonymousLayerIdentifier(path)) { return false; } std::string realPath = layer->GetRealPath(); std::string externRefPathRelative; if (makeRelativePathTo(path, realPath, externRefPathRelative)) { path = externRefPathRelative; return true; } else { CARB_LOG_INFO("Cannot make '%s' relative to '%s'", path.c_str(), realPath.c_str()); } return false; } /** * Removes a property from all layer specs. * * @param stage The stage to remove property. * @param primPath The parent prim path of the property to be removed. * @param propertyName Name of the property to be removed. * @return true if property has been removed from at least one layer. */ static bool removeProperty(pxr::UsdStageWeakPtr stage, const pxr::SdfPath& primPath, const pxr::TfToken& propertyName) { bool ret = false; auto layerStack = stage->GetLayerStack(); for (auto&& layer : layerStack) { auto primSpec = layer->GetPrimAtPath(primPath); if (primSpec) { auto propertySpec = layer->GetPropertyAtPath(primPath.AppendProperty(propertyName)); if (propertySpec) { primSpec->RemoveProperty(propertySpec); ret |= true; } } } return ret; } /** * Gets the attribute value. * * @tparam T the data type of the attribute. * @param attribute The attribute to get value from. * @param time Current timecode. * @return the value of the attribute. */ template <class T> static T getAttribute(pxr::UsdAttribute& attribute, pxr::UsdTimeCode time) { T val; attribute.Get(&val, time); return val; } /** * Gets the attribute array value. * * @tparam T The data type of the attribute array. * @param attribute The attribute to get value from. * @param out the value of the attribute. * @return True if the out value is valid. */ template <typename T> static bool getAttributeArray(pxr::UsdAttribute& attribute, pxr::VtArray<T>& out, pxr::UsdTimeCode time) { pxr::VtValue arrayDataValue; attribute.Get(&arrayDataValue, time); if (arrayDataValue.GetArraySize()) { out = arrayDataValue.Get<pxr::VtArray<T>>(); return true; } return false; } /** * enum to show effective timesamples in layerstacks based on current authoring layer */ enum class TimeSamplesOnLayer { eNoTimeSamples, eOnCurrentLayer, eOnStrongerLayer, eOnWeakerLayer }; /** * check if attribute has efficient timesample and * these data are on currentlayer/strongerlayer/weakerlayer * @tparam Stage Current Working Stage. * @param attribute The attribute to check. */ static TimeSamplesOnLayer getAttributeEffectiveTimeSampleLayerInfo(const pxr::UsdStage& stage, const pxr::UsdAttribute& attr, pxr::SdfLayerRefPtr* outLayer = nullptr) { if (attr.GetNumTimeSamples() == 0) return TimeSamplesOnLayer::eNoTimeSamples; auto authoringLayer = stage.GetEditTarget().GetLayer(); bool isOnStrongerLayer = true; const pxr::PcpLayerStackPtr& nodeLayers = attr.GetResolveInfo().GetNode().GetLayerStack(); const pxr::SdfLayerRefPtrVector& layerStack = nodeLayers->GetLayers(); for (auto layer : layerStack) { auto attrSpec = layer->GetAttributeAtPath(attr.GetPath()); if (attrSpec && attrSpec->GetTimeSampleMap().size() > 0) { if (outLayer) { *outLayer = layer; } if (layer == authoringLayer) { return TimeSamplesOnLayer::eOnCurrentLayer; } else if (isOnStrongerLayer) { return TimeSamplesOnLayer::eOnStrongerLayer; } else { return TimeSamplesOnLayer::eOnWeakerLayer; } } else { if (layer == authoringLayer) { isOnStrongerLayer = false; } } } return TimeSamplesOnLayer::eNoTimeSamples; } /** * Copy TimeSample From Waker Layer. * * @param Stage Current Working Stage. * @param attribute The attribute to check. */ static void copyTimeSamplesFromWeakerLayer(pxr::UsdStage& stage, const pxr::UsdAttribute& attr) { pxr::SdfLayerRefPtr outLayer; if (getAttributeEffectiveTimeSampleLayerInfo(stage, attr, &outLayer) == TimeSamplesOnLayer::eOnWeakerLayer) { pxr::SdfTimeSampleMap timesamples; if (attr.GetMetadata(pxr::TfToken("timeSamples"), &timesamples)) { attr.SetMetadata(pxr::TfToken("timeSamples"), timesamples); } } } /** * Sets attribute value. * * @tparam T The data type of the attribute. * @param attribute The attribute to set value to. * @param val The value to set. * @param autoTargetSessionLayer whether the edit target should auto switch to session layer. * @return true if set is successfully. */ template <class ValueType> static bool setAttribute(const pxr::UsdAttribute& attribute, const ValueType& val, pxr::UsdTimeCode timeCode = pxr::UsdTimeCode::Default(), bool skipEqualSetForTimeSample = false, bool autoTargetSessionLayer = true) { PXR_NS::UsdTimeCode setTimeCode = timeCode; if (!isTimeSampled(attribute)) { setTimeCode = PXR_NS::UsdTimeCode::Default(); } // This is here to prevent the TransformGizmo from writing a translation, rotation and scale on every // key where it sets a value. At some point we should revisit the gizmo to simplify the logic, and // start setting only the transform value the user intends. if (skipEqualSetForTimeSample) { if (!setTimeCode.IsDefault() && !hasTimeSample(attribute, setTimeCode)) { ValueType value; bool result = attribute.Get(&value, timeCode); if (result && PXR_NS::GfIsClose(value, val, 1e-6)) { return false; } } } // if the prim is defined on session layer, or the attribute itself is on session layer, switch EditTarget to session layer instead std::unique_ptr<PXR_NS::UsdEditContext> editCtx; auto stage = attribute.GetStage(); if (autoTargetSessionLayer) { // check first if the attribute is on session layer auto sessionOrSubLayer = getLayerIfSpecOnSessionOrItsSublayers(stage, attribute.GetPath()); if (!sessionOrSubLayer) { // if attribute doesn't exist, fallback to prim "def" (but not "over") sessionOrSubLayer = getLayerIfDefOnSessionOrItsSublayers(stage, attribute.GetPrim().GetPath()); } if (sessionOrSubLayer) { editCtx = std::make_unique<PXR_NS::UsdEditContext>(stage, sessionOrSubLayer); } } if (!setTimeCode.IsDefault()) { copyTimeSamplesFromWeakerLayer(*stage, attribute); } return attribute.Set(val, setTimeCode); } static bool getDefaultPrimName(const pxr::UsdStageRefPtr stage, std::string& defaultPrimName) { if (stage && stage->HasDefaultPrim()) { defaultPrimName = stage->GetDefaultPrim().GetName(); return true; } return false; } static pxr::UsdStageRefPtr getUsdStageFromId(long int stageId) { return pxr::UsdUtilsStageCache::Get().Find(pxr::UsdStageCache::Id::FromLongInt(stageId)); } static bool isSameDriveOrProtocol(const std::string& path1, const std::string& path2) { auto absolutePathArray = tokenizePath(path1); auto anchorPathArray = tokenizePath(path2); if (absolutePathArray.size() > 0 && anchorPathArray.size() > 0) { const std::string& protocolOrDrivePath = absolutePathArray.front(); const std::string& protocolOrDriveAnchor = anchorPathArray.front(); return isSameDriveOrProtocolInternal(protocolOrDrivePath, protocolOrDriveAnchor); } return false; } static std::string findNextNoneExisitingNodePath(pxr::UsdStageWeakPtr stage, std::string path, bool prependDefaultPrimPath) { uint64_t dupSuffix = 2; pxr::UsdPrim oldPrim; std::string newPrimPath; // Do not support dot in the path std::replace(path.begin(), path.end(), '.', '_'); // Do not support Prim names begin with a number in any part of the path // Add a "_" in front of any section that starts with number. e.g. /1foo/2bar/baz -> /_1foor/_2bar/baz static const std::regex kNodeNameStartsWithNumber( "(\\/)([0-9])", std::regex_constants::icase | std::regex_constants::optimize); path = std::regex_replace(path, kNodeNameStartsWithNumber, "/_$2"); if (prependDefaultPrimPath && stage->HasDefaultPrim()) { auto defaultPrim = stage->GetDefaultPrim(); if (defaultPrim) { path = stage->GetDefaultPrim().GetPath().GetString() + path; } } std::string oldPrimTestPath = path; // Find out if the path already has a dup suffix number static const std::regex kNodeNameEndsWithUnderscoreNumber( "(_)([0-9]+)$", std::regex_constants::icase | std::regex_constants::optimize); std::smatch m; if (std::regex_search(path, m, kNodeNameEndsWithUnderscoreNumber)) { std::string dupSuffixStr = m[m.size() - 1]; dupSuffix = std::strtoul(dupSuffixStr.c_str(), nullptr, 0); path = m.prefix(); } for (;;) { // If the node already exists, append a "_n" to the end oldPrim = stage->GetPrimAtPath(pxr::SdfPath(oldPrimTestPath)); if (oldPrim.IsValid()) { oldPrimTestPath = std::string(path) + "_" + std::to_string(dupSuffix++); } else { newPrimPath = oldPrimTestPath; break; } } return newPrimPath; } static PXR_NS::SdfPathVector fromStringArray(const char** stringArray, size_t count) { PXR_NS::SdfPathVector sdfPaths(count); for (size_t i = 0; i < count; i++) { sdfPaths[i] = PXR_NS::SdfPath(stringArray[i]); } return sdfPaths; } static PXR_NS::SdfPathVector fromStringArray(const std::vector<std::string>& stringArray) { PXR_NS::SdfPathVector sdfPaths(stringArray.size()); for (size_t i = 0; i < stringArray.size(); i++) { sdfPaths[i] = PXR_NS::SdfPath(stringArray[i]); } return sdfPaths; } static std::vector<std::string> toStringArray(const PXR_NS::SdfPathVector& paths) { std::vector<std::string> stringArray(paths.size()); for (size_t i = 0; i < paths.size(); i++) { stringArray[i] = paths[i].GetString(); } return stringArray; } /** * Finds if the given path (prim/attribute/property/object/etc) has a spec on session layer or its sublayers. * * @param stage of the prim. * @param path The path to be checked for spec. * @param predicate additional predicate to be called if spec is found on the layer. * * @return the layer that has spec at given path, or nullptr if not found. */ static PXR_NS::SdfLayerRefPtr getLayerIfSpecOnSessionOrItsSublayers( PXR_NS::UsdStageRefPtr stage, const PXR_NS::SdfPath& path, const std::function<bool(PXR_NS::SdfSpecHandle)>& predicate = nullptr) { auto sessionLayer = stage->GetSessionLayer(); auto hasSpec = [&path, &predicate](PXR_NS::SdfLayerRefPtr layer) { auto spec = layer->GetObjectAtPath(path); return spec && (!predicate || predicate(spec)); }; if (hasSpec(sessionLayer)) { return sessionLayer; } auto sublayerPaths = sessionLayer->GetSubLayerPaths(); for (auto const& path : sublayerPaths) { auto layer = PXR_NS::SdfLayer::Find(path); if (layer) { if (hasSpec(layer)) { return layer; } } } return nullptr; } /** * Finds if the given *prim* path has a "def" *primSpec* on session layer or its sublayers. * If you want to find attributeSpec use @ref getLayerIfSpecOnSessionOrItsSublayers instead. * * @stage stage of the prim. * @param path The path to be checked for "def" primSpec. * * @return the layer that has "def" prim spec, or nullptr if not found. */ static PXR_NS::SdfLayerRefPtr getLayerIfDefOnSessionOrItsSublayers(PXR_NS::UsdStageRefPtr stage, const PXR_NS::SdfPath& path) { return getLayerIfSpecOnSessionOrItsSublayers( stage, path, [](PXR_NS::SdfSpecHandle spec) { auto primSpec = PXR_NS::SdfSpecStatic_cast<PXR_NS::SdfPrimSpecHandle>(spec); return primSpec && PXR_NS::SdfIsDefiningSpecifier(primSpec->GetSpecifier()); }); } private: static std::list<std::string> tokenizePath(std::string path) { std::list<std::string> result; for (;;) { size_t pos = path.find_first_of('/'); if (pos == std::string::npos) { if (!path.empty()) { result.push_back(path); } break; } else { std::string token = path.substr(0, pos); if (!token.empty()) { result.push_back(token); } path = path.substr(pos + 1); } } return result; } static bool isSameDriveOrProtocolInternal(const std::string& protocol1, const std::string& protocol2) { // If E: or omni: if (protocol1.length() && protocol2.length() && protocol1.back() == ':' && protocol2.back() == ':') { if (protocol1.length() == protocol2.length()) { #if CARB_PLATFORM_WINDOWS constexpr auto strncasecmp = _strnicmp; #endif if (strncasecmp(protocol1.c_str(), protocol2.c_str(), protocol1.length()) == 0) { return true; } } } return false; } static bool makeRelativePathTo(const std::string& absolutePath, const std::string& anchor, std::string& relativePath) { auto omniclient = carb::getCachedInterface<carb::omniclient::IOmniClient>(); if (omniclient) { std::string result; relativePath = carb::omniclient::makeRelativeUrl(omniclient, anchor.c_str(), absolutePath.c_str()); #if CARB_PLATFORM_WINDOWS carb::omniclient::OmniClientUrlPtr clientUrl(omniclient, relativePath.c_str()); if (!clientUrl->scheme || strcmp(clientUrl->scheme, "omniverse") != 0) // omniverse path can have '\' { std::replace(relativePath.begin(), relativePath.end(), '\\', '/'); } #endif return relativePath != absolutePath; } return false; } static bool checkAncestralNode(const pxr::PcpNodeRef& node) { bool isAncestral = node.IsDueToAncestor(); if (!isAncestral) { using namespace pxr; TF_FOR_ALL(childIt, node.GetChildrenRange()) { isAncestral |= checkAncestralNode(*childIt); if (isAncestral) { break; } } } return isAncestral; } static bool checkAncestral(const pxr::UsdPrim& prim) { return checkAncestralNode(prim.GetPrimIndex().GetRootNode()); } template <class HalfType, class FloatType, class DoubleType, class ValueType> static bool setValueWithPrecision(pxr::UsdGeomXformOp& xformOp, const ValueType& value, pxr::UsdTimeCode timeCode = pxr::UsdTimeCode::Default(), bool skipEqualSetForTimeSample = false) { switch (xformOp.GetPrecision()) { case pxr::UsdGeomXformOp::PrecisionHalf: return UsdUtils::setAttribute( xformOp.GetAttr(), HalfType(FloatType(value)), timeCode, skipEqualSetForTimeSample); case pxr::UsdGeomXformOp::PrecisionFloat: return UsdUtils::setAttribute(xformOp.GetAttr(), FloatType(value), timeCode, skipEqualSetForTimeSample); case pxr::UsdGeomXformOp::PrecisionDouble: return UsdUtils::setAttribute(xformOp.GetAttr(), DoubleType(value), timeCode, skipEqualSetForTimeSample); } return false; } }; /** * Gets the string attribute value. * * @param attribute The attribute to get value from. * @param time Current timecode. * @return the value of the attribute. */ template <> // Define it out of class body to avoid "Explicit specialization in non-namespace scope" error. inline std::string UsdUtils::getAttribute(pxr::UsdAttribute& attribute, pxr::UsdTimeCode time) { pxr::VtValue val; attribute.Get(&val, time); if (attribute.GetTypeName() == pxr::SdfValueTypeNames->String) { return val.Get<std::string>(); } else if (attribute.GetTypeName() == pxr::SdfValueTypeNames->Asset) { auto path = val.Get<pxr::SdfAssetPath>(); return path.GetAssetPath(); } return ""; } } }
omniverse-code/kit/include/omni/usd/IStageAudio.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/events/IEvents.h> #include <carb/audio/IAudioPlayback.h> #include <carb/audio/IAudioUtils.h> #include <omni/usd/Api.h> #include <limits.h> namespace omni { namespace usd { namespace audio { class AudioManager; /** base type for a streamer handle value. This uniquely identifies a single streamer * that has been created in the audio manager. A value of this type can be * @ref kInvalidStreamerId to indicate an invalid handle. */ using StreamerId = size_t; /** an invalid handle value for a streamer. This can be returned from createCaptureStreamer(). */ constexpr StreamerId kInvalidStreamerId = (size_t)(SIZE_MAX); /** The status of a sound's asset's loading. */ enum class AssetLoadStatus { eDone, /**< Asset has finished loading */ eInProgress, /**< Asset loading is in progress */ eFailed, /**< Asset has failed to load */ eNotRegistered, /**< Prim was not registered from hydra yet. */ eNoAssetPath, /**< No asset path has been set for the prim. No load has been queued. */ }; /** The default value to be used for a setting. */ enum class FeatureDefault { eOn, /**< The setting is enabled when a value is not explicitly set. */ eOff, /**< The setting is disabled when a value is not explicitly set. */ eForceOn, /**< The setting is always enabled. * This is intended to allow users to override all sounds in * a scene to test what a specific feature does. */ eForceOff, /**< The setting is always disabled. * This is intended to allow users to override all sounds in * a scene to test what a specific feature does. */ }; /** Names for the various change notification events that can be sent on the event * stream from UsdAudioContext::getMetadataChangeStream(). */ enum class EventType { /** An event type that is sent from UsdAudioContext::getMetadataChangeStream(). * This indicates that the audio metadata has changed for the current stage. * This is also sent when a new stage is loaded. */ eMetadataChange = 1, /** An event type that is sent from UsdAudioContext::getMetadataChangeStream(). * This indicates that the stage's listener state has changed and any property * windows should be updated. This change may include a new listener being * added to the stage or an existing listener being removed from the stage. */ eListenerListChange, /** An event type that is sent from UsdAudioContext::getMetadataChangeStream(). * This indicates that the active listener in the stage has changed and any * property windows displaying it should be updated. */ eActiveListenerChange, /** The context object has been created and is now valid. It can be retrieved with a call to * getPlaybackContext(). */ eContextCreated, /** The context object will be destroyed soon. External callers that hold the context object * should invalidate their pointer and avoid using it further. */ eContextWillBeDestroyed, /** The context object has been destroyed. External calls that held the context object should * already no longer be using this object. This is just a notification that the object * destruction has been completed. */ eContextDestroyed, }; /** The method that will be used to calculate the length of a sound prim. */ enum class SoundLengthType { /** The length of time the sound is estimated to play for in the stage once * it's triggered. * This will be the lesser of the difference between the sound's start and * end times (if an end time is set on the prim) or the length of the * actual sound itself (including `mediaOffsetStart`, `mediaOffsetEnd` * and `timeScale`), multiplied by loop count. * Note that timeScale is taken into account when calculating the play time * of an asset. * For sounds that are set to loop infinitely, this will be a very large * number (on the scale of 100 days). */ ePlayLength, /** The length of the sound. * This doesn't include the sound's start time, end time or loop count. * This is calculated using `mediaOffsetStart` and `mediaOffsetEnd` if those * are set; otherwise, this just returns the sound asset's length. * `timeScale` will also affect the length of the sound in this case. */ eSoundLength, /** The length of the underlying sound asset, ignoring any USD parameters. */ eAssetLength }; /** A pure virtual class for streaming audio data from an @ref AudioManager. */ class Streamer { public: /** A callback for when a stream opens. * @param[inout] format The format of the stream. * This format can be edited to whatever is needed by the * callback and the audio engine will convert the data to * the desired format. * @param[inout] context The user-specified context. * @returns `true` if the stream was initialized successfully. * @returns `false` if the stream should be abandoned. */ virtual bool open(carb::audio::SoundFormat* format) noexcept = 0; /** A callback for when a stream closes. * @param[inout] context The user-specified context. */ virtual void close() noexcept = 0; /** A callback that receives audio data. * @param[in] data The buffer of audio data. * @param[in] bytes The number of bytes of data specified in @p data. * This buffer is only valid until the callback returns. * @param[inout] context The user-specified context. * @note This callback is sent from the audio engine's thread, so this callback * must do minimal work and return to avoid stalling the audio engine. * Any callback that wants to do something expensive, such as rendering * an image or performing a FFT should use the callback to copy data to * a buffer, then process that buffer in a separate thread. * @note It's not possible to change the rate at which data is received. * Because Kit plays audio to a physical device, that device must be * allowed to control the data rate to avoid underruns/overruns. */ virtual void writeData(const void* data, size_t bytes) noexcept = 0; }; /** A descriptor for drawWaveform(). */ struct AudioImageDesc { /** Flags that alter the drawing style. */ carb::audio::AudioImageFlags flags; /** This specifies which audio channel from @ref sound will be rendered. * This is ignored when @ref fAudioImageMultiChannel or @ref fAudioImageSplitChannels * is set on @ref flags. */ size_t channel; /** The buffer that holds the image data. * The image format is RGBA8888. * This must be @ref height * @ref pitch bytes long. * This may not be nullptr. */ void* image; /** The width of the image in pixels. */ size_t width; /** The width of the image buffer in bytes. * This can be set to 0 to use 4 * @ref width as the pitch. * This may be used for applications such as writing a subimage or an * image that needs some specific alignment. */ size_t pitch; /** The height of the image in pixels. */ size_t height; /** The background color to write to the image in normalized RGBA color. * The alpha channel in this color is not used to blend this color with * the existing data in @ref image; use @ref fAudioImageNoClear if you * want to render on top of an existing image. * This value is ignored when @ref fAudioImageNoClear is set on @ref flags. */ carb::Float4 background; /** The colors to use for the image in normalized RGBA colors. * If @ref carb::audio::fAudioImageMultiChannel or @ref * carb::audio::fAudioImageSplitChannels are set, each element in this * array maps to each channel in the output audio data; otherwise, element * 0 is used as the color for the single channel. */ carb::Float4 colors[carb::audio::kMaxChannels]; }; /** The IStageAudio interface. * This was previously a Carbonite interface, but it was changed to a regular * C interface due to linking issues related to the USD shared lib. * This may return to being a Carbonite interface eventually. * @{ */ /** Get the default audio manager. * @returns The default audio manager. * This is equivalent to calling UsdContext::getContext()->getAudioManager(), * except that you don't have to include UsdContext.h in your module. * At the time of writing, UsdContext.h pulls in the USD headers, which * causes conflicts with pybind11 modules. * @returns nullptr if audio is disabled. * @returns nullptr if the UsdContext couldn't be retrieved for whatever reason. */ OMNI_USD_API AudioManager* getDefaultAudioManager(); /** Retrieve the audio playback context for an audio manager. * * @param[in] mgr The audio manager to retrieve the playback context from. This may * not be `nullptr`. This can be the audio manager object returned * from getDefaultAudioManager(). * @returns The audio playback context that the requested audio manager is using. * * @remarks This retrieves the audio playback context object for the requested * audio manager object. This can be used by external callers to play * their own sounds on the same context that the main USD stage does. * * Notifications about the creation and destruction of the context object * can be received by subscribing to the event stream returned by * getMetadataChangeStream(). The interesting events in this case will be * the @a EventType::eContext* events. Note that the context is only ever * created when the given audio manager is first created. This means that * most callers would not be subscribed to the event stream by that point. * This is normally acceptable since the same context is reused from one * stage to the next. The more important notification will be the * @ref EventType::eContextWillBeDestroyed event. When this is received, the * context object should be considered invalidated and all future operations * on it stopped. The playback context object is typically only destroyed * during shutdown when the audio manager is unloading. */ OMNI_USD_API carb::audio::Context* getPlaybackContext(AudioManager* mgr); /** Retrieves the total number of registered sound objects in the USD stage. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * * @return the total number of sounds in the stage. This will be one larger than * the highest valid index of sound prims in the current USD stage. * * @note Sounds that have not had their asset loaded yet (or their asset failed * to load) will not show up in the sound count unless they've been passed * to an IStageAudio function. */ OMNI_USD_API size_t getSoundCount(AudioManager* mgr); /** Immediately plays the requested USD stage sound if it is loaded. * * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] path The path to sound prim to play. * @return no return value. * * @remarks This plays a single non-looping instance of a USD stage sound immediately. The * sound must have already been loaded. If the sound resource was missing or * couldn't be loaded, this call will simply be ignored. This will return * immediately after scheduling the sound to play. It will never block for the * duration of the sound playback. This sound may be prematurely stopped with * stopSound(). * * @note This operation is always thread safe. * * @note The loopCount parameter of the prim parameter is ignored in this call. * This functionality will be added in a future revision. * * @note If the sound at path @p path is playing when this is called, the * previous playing instance will continue playing. * The previously playing instance will no longer receive updates when * the USD prim at path @p path is changed. * The only way to stop this previously playing instance is for it to * end on its own or for the timeline to stop. * In the future, this function may stop the previously playing instance. * For cases where playing the same sound repeatedly in a fire-and-forget * fashion is desired, use spawnVoice(). * * @note The playing sound will stop when the timeline is stopped. * This behavior may change in the future. * * @note OmniSound prims that are scheduled to play in an animation should not also * be played with playSound(), since it may prevent them from playing * when they are scheduled to play. */ OMNI_USD_API void playSound(AudioManager* mgr, const char* path); /** Queries whether a sound object is currently playing. * * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] path The path to sound prim to query the playing state for. * @return true if the sound object is currently playing. * @return false if the sound has either finished playing or has not been played yet. * * @remarks This queries whether a sound is currently playing. If this fails, that may mean * that the sound ended naturally on its own or it was explicitly stopped. Note * that this may continue to return true for a short period after a sound has been * stopped with stopSound() or stopAllSounds(). This period may be up to 10 * milliseconds. * * @note This only checks the most recently playing instance of a sound, * if multiple simultaneous sounds have been spawned with playSound(). */ OMNI_USD_API bool isSoundPlaying(AudioManager* mgr, const char* path); /** Immediately schedules the stop of the playback of a sound. * * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] path The path to sound prim to stop playback for. * * @remarks This stops the playback of an active sound. If the sound was not playing or had * already naturally stopped on its own, this call is ignored. * * @note This only stops the most recently played instance of a sound, if * multiple overlapping instances of a sound were played with playSound(). */ OMNI_USD_API void stopSound(AudioManager* mgr, const char* path); /** Retrieves length of a sound in seconds (if known). * * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] path The path to sound prim to retrieve the length for. * @return the play length of the sound in seconds if the asset is loaded and the length * can be calculated. * @return 0.0 if the sound asset is not available yet or the length could not be properly * calculated. * * @remarks This calculates the length of a USD stage sound in seconds. This will be the * lesser of the difference between the sound's start and end times (if an end time * is set on the prim) or the length of the actual sound asset itself (if not * looping). In either case, this will be the amount of time that the sound would * be expected to play for if it were triggered. For sounds that are set to loop, * the returned time will include all scheduled loop iterations. For sounds that * are set to loop infinitely, this will be a very large number (on the scale of * 100 days). * This is equivalent to calling getSoundLengthEx() with `type` set to `ePlayLength`. */ OMNI_USD_API double getSoundLength(AudioManager* mgr, const char* path); /** Retrieves length of a sound in seconds (if known). * * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] path The path to sound prim to retrieve the length for. * @param[in] type The type of calculation to perform. * @return The play length of the sound in seconds if the asset is loaded and the length * can be calculated. * @return 0.0 if the sound asset is not available yet or the length could not be properly * calculated. */ OMNI_USD_API double getSoundLengthEx(AudioManager* mgr, const char* path, SoundLengthType type); /** Stops all currently playing USD stage sounds. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * * @return no return value. * * @remarks This stops all currently playing stage sounds. Any sounds that have been * queued for playback will be stopped. UI sounds will not be affected. This * is intended to be used to reset the sound playback system when an animation * sequence is stopped. This will be automatically called internally whenever * the animation sequence is stopped or it loops. */ OMNI_USD_API void stopAllSounds(AudioManager* mgr); /** Immediately plays the requested USD stage sound as a new @ref carb::audio::Voice if it is loaded. * * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] path The path to sound prim to spawn a voice from. * * @returns The new voice that was spawned. * This returns a handle, so there is no need to free the result; the * pointer can be discarded. * This voice's settings are only a snapshot of the sound prim that * they were based off. Updates to these parameters will have to be * performed on the returned voice through the IAudioPlayback interface. * @returns nullptr if a new voice could not be spawned. * * @remarks This begins playing the requested sound as a new @ref carb::audio::Voice. * The sound must have already been loaded or nullptr will be returned. * The spawned voice plays the sound asynchronously for the lifetime * of the voice. * This is intended for cases where the behavior of playSound() is too * limiting. * * @note This operation is always thread safe. * * @note stopAllSounds() and stopSound() do not affect the playing voices * spawned from this call. * * @note Unlike playSound(), the loopCount parameter of the prim is used, so * the voice must be explicitly stopped if the voice is infinitely * looping. * * @note Unlike playSound(), these voice handles are managed separately from * the voice handles of the timeline, so spawning a voice from a sound * that will play on the timeline shouldn't affect that sound's timeline * playback. * Stopping the timeline will also not stop these playing voices. */ OMNI_USD_API carb::audio::Voice* spawnVoice(AudioManager* mgr, const char* path); /** Queries whether the asset of an individual sound has been fully loaded. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] path The path to sound prim to retrieve the status of. * @return AssetLoadStatus::eInProgress if the asset is in the process of loading. * @return AssetLoadStatus::eDone if the asset has finished loading and is ready * for immediate playback. * @return AssetLoadStatus::eFailed if the asset has failed to load. * @return AssetLoadStatus::eFailed if the audio manager has not loaded. * @return AssetLoadStatus.NotRegistered if the sound prim has not been * registered with the audio manager yet. This happens when the * hydra renderer hasn't started creating the prims yet. */ OMNI_USD_API AssetLoadStatus getSoundAssetStatus(AudioManager* mgr, const char* path); /** Bind a callback for when assets are loaded. * @param[in] mgr The AudioManager instance that this function acts upon. * @param[in] path The path to sound prim to bind a callback to. * @param[in] callback The callback to fire once a load has occurred. * The parameter passed to this callback is @p callbackContext. * @param[in] callbackContext The context parameter to pass to @p callback. * * @returns true if the callback was bound successfully. * @returns true if the callback was executed immediately. * @returns false if the prim path passed corresponds to a prim that's not * of type `OmniSound`. * @returns false if the prim path passed did not correspond to any prim. * @returns false if an unexpected error prevents the callback from occurring. * * @remarks This will fire the callback when the sound's asset is loaded or * immediately if the asset was already loaded. The callback will * only fire once. */ OMNI_USD_API bool subscribeToAssetLoad(AudioManager* mgr, const char* path, void (*callback)(void*), void* callbackContext); /** Change the active Listener prim in the scene. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] path The path to the Listener prim to set. * This can be nullptr to leave the active camera as the * active listener. * @returns true if the prim at @p path was set as the active prim. * @returns false if the prim at @p path was not registered with hydra. * This can occur if hydra has not informed the audio manager about * its existence yet. */ OMNI_USD_API bool setActiveListener(AudioManager* mgr, const char* path); /** Get the active Listener prim in the scene. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns The path to the active listener if one is set. * @returns nullptr if no active listener is set, which means the active camera * is being used as the listener. */ OMNI_USD_API const char* getActiveListener(AudioManager* mgr); /** Retrieves the total number of listener prims in the current stage. * * @param[in] mgr The stage audio manager instance that this function acts upon. This * must not be nullptr. * @returns The total number of listener prims in the current stage. Note that this will * reflect the total number of listener prims that have been registered with the * audio manager. This will not necessarily always match with the number of * listener prims that USD knows about from one moment to the next. There may be * a small delay between when a prim is added or removed and when the audio manager * is notified of that change. */ OMNI_USD_API size_t getListenerCount(AudioManager* mgr); /** Retrieves the SDF path of an indexed listener prim in the current stage. * * @param[in] mgr The stage audio manager instance that this function acts upon. This * must not be `nullptr`. * @param[in] index The zero based index of the listener prim to retrieve the SDF path for. * This should be between 0 and one less than the most recent return value * from getListenerCount(). * @param[out] sdfPath Receives the SDF path of the requested listener prim. This may not be * `nullptr`. * @param[in] maxLen The maximum number of characters including the terminating null that can * fit in @p sdfPath. * @returns `true` if the path of the requested listener prim is successfully returned. Returns * `false` otherwise. */ OMNI_USD_API bool getListenerByIndex(AudioManager* mgr, size_t index, char* sdfPath, size_t maxLen); /** Set the default value for whether doppler calculations are enabled for the current USD Stage. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] value The value to set this as. * The default value of this is @ref FeatureDefault::eOff. * This is the default because Doppler effect's implementation * is still experimental. The default will be switched to * @ref FeatureDefault::eOn when the feature is stabilized. * @remarks This will append the USD Stage metadata to add this new scene setting. */ OMNI_USD_API void setDopplerDefault(AudioManager* mgr, FeatureDefault value = FeatureDefault::eOff); /** Get the default value for whether doppler calculations are enabled for the current USD Stage. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns The default value for whether doppler calculations are enabled for the current USD Stage. */ OMNI_USD_API FeatureDefault getDopplerDefault(AudioManager* mgr); /** Set the default value for whether distance delayed audio is enable for the current USD Stage. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] value The value to set this as. * The default value of this is @ref FeatureDefault::eOff. * This is the default because distance delay can have a very * confusing effect if worldUnitScale hasn't been set correctly. * @remarks This will append the USD Stage metadata to add this new scene setting. */ OMNI_USD_API void setDistanceDelayDefault(AudioManager* mgr, FeatureDefault value = FeatureDefault::eOff); /** Set the default value for whether distance delayed audio is enable for the current USD Stage. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns The default value for whether distance delayed audio is enable for the current USD Stage. */ OMNI_USD_API FeatureDefault getDistanceDelayDefault(AudioManager* mgr); /** Set the default value for whether interaural delay is enabled for the current USD Stage. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] value The value to set this as. * The default value of this is @ref FeatureDefault::eOff. * This feature is currently not implemented. * @remarks This will append the USD Stage metadata to add this new scene setting. */ OMNI_USD_API void setInterauralDelayDefault(AudioManager* mgr, FeatureDefault value = FeatureDefault::eOff); /** Get the default value for whether interaural delay is enabled for the current USD Stage. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns the default value for whether interaural delay is enabled for the current USD Stage. */ OMNI_USD_API FeatureDefault getInterauralDelayDefault(AudioManager* mgr); /** The minimum number of sounds in a scene that can be played concurrently. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] value The new value for the number of concurrent voices. * * @remarks In a scene where `concurrentVoice` is set to `N` and `N + 1` * sounds are played concurrently, Omniverse Kit will choose to not * play the `N+1`th sound to the audio device and just track it as a * 'virtual' voice. * The voices chosen to become 'virtual' will be the lowest priority * or silent. A 'virtual' voice should begin playing again as soon * as one of the `N` playing voices has finished. */ OMNI_USD_API void setConcurrentVoices(AudioManager* mgr, int32_t value = 64); /** Gets the minimum number of sounds in a scene that can be played concurrently. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns the minimum number of sounds in a scene that can be played concurrently. */ OMNI_USD_API int32_t getConcurrentVoices(AudioManager* mgr); /** Sets the speed of sound in the medium surrounding the listener (typically air). * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] value The new value for the speed of sound. * * @remarks This is measured in meters per second. * This would typically be adjusted when doing an underwater scene. * The speed of sound in dry air at sea level is approximately 340.0m/s. * */ OMNI_USD_API void setSpeedOfSound(AudioManager* mgr, double value = carb::audio::kDefaultSpeedOfSound); /** Gets the speed of sound in the medium surrounding the listener (typically air). * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns The speed of sound in the medium surrounding the listener. */ OMNI_USD_API double getSpeedOfSound(AudioManager* mgr); /** Sets a scaler that can exaggerate or lessen the Doppler effect. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] value The new value for the doppler scale. * * @remarks Setting this above 1.0 will exaggerate the Doppler effect. * Setting this below 1.0 will lessen the Doppler effect. * Negative values and zero are not allowed. * Doppler effects alter the pitch of a sound based on its relative * velocity to the listener. */ OMNI_USD_API void setDopplerScale(AudioManager* mgr, double value = 1.0); /** Gets a scaler for the doppler effect. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns The scaler for the doppler effect. */ OMNI_USD_API double getDopplerScale(AudioManager* mgr); /** Sets a Limit on the maximum Doppler pitch shift that can be applied to a playing voice. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] value The new value for the doppler limit. * * @remarks Since Omniverse Kit does not handle supersonic spatial audio, a * maximum frequency shift must be set for prims that move toward the * listener at or faster than the speed of sound. This is mostly * useful for handling edge cases such as teleporting an object far * away while it's playing a sound. */ OMNI_USD_API void setDopplerLimit(AudioManager* mgr, double value = 2.0); /** Gets a Limit on the maximum Doppler pitch shift that can be applied to a playing voice. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns The Limit on the maximum Doppler pitch shift that can be applied to a playing voice. */ OMNI_USD_API double getDopplerLimit(AudioManager* mgr); /** This sets the timescale modifier for all spatial voices. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] value The new value for the spatial timescale. * * @remarks Each spatial OmniSound prim multiplies its timeScale attribute by this value. * For example, setting this to 0.5 will play all spatial sounds at * half speed and setting this to 2.0 will play all non-spatial * sounds at double speed. * This affects delay times for the distance delay effect. * Altering the playback speed of a sound will affect the pitch of the sound. * The limits of this setting under Omniverse Kit are [1/1024, 1024]. * This feature is intended to allow time-dilation to be performed with the * sound effects in the scene without affecting non-spatial elements like * the background music. */ OMNI_USD_API void setSpatialTimeScale(AudioManager* mgr, double value = 1.0); /** This gets the timescale modifier for all spatial voices. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns The timescale modifier for all spatial voices. */ OMNI_USD_API double getSpatialTimeScale(AudioManager* mgr); /** The timescale modifier for all non-spatial voices. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be ptr. * @param[in] value The new value for the non-spatial timescale. * * @remarks Each prim multiplies its timeScale attribute by this value. * For example, setting this to 0.5 will play all non-spatial sounds * at half speed and setting this to 2.0 will play all non-spatial * sounds at double speed. * Altering the playback speed of a sound will affect the pitch of the sound. * The limits of this setting under Omniverse Kit are [1/1024, 1024]. */ OMNI_USD_API void setNonSpatialTimeScale(AudioManager* mgr, double value = 1.0); /** The timescale modifier for all non-spatial voices. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be ptr. * @returns The timescale modifier for all non-spatial voices. */ OMNI_USD_API double getNonSpatialTimeScale(AudioManager* mgr); /** Test whether the Hydra audio plugin is accessible. * @returns This returns true if the plugin could load. * @returns This returns false if the plugin failed to load or doesn't exist. * @remarks This is intended to allow the tests to check whether the Hydra audio * plugin is still working. */ OMNI_USD_API bool testHydraPlugin(); /** Switch to use a new device for for audio output. * * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be ptr. * @param[in] deviceName the name or GUID of the device to set as active. This must * exactly match the name or GUID of one of the devices attached * to the system at the time. If the given name or GUID doesn't * match one of the connected devices, the default device will be * used instead. This may be set to nullptr or an empty string * to use the system's default device. * @returns no return value. * * @remarks This sets the device that the audio manager will use for its output. If the * requested device cannot be used for any reason, the default output device will * be used instead. The device may change the final output format. If a streamer * is attached to the previous output, its stream will be closed before opening * a new stream on the new device. Even if the new device name matches the current * device's name, the device will still be changed and any stream reset. * * @note If multiple devices attached to the system have the same name, the one that is * chosen may be undefined. This can be a common issue with certain devices showing * up in the system as simply "Speakers". Using the device's GUID instead will allow * a specific device to be used instead, even its name exactly matches that of another * device. */ OMNI_USD_API void setDevice(AudioManager* mgr, const char* deviceName); /** creates a new capture streamer. * * @param[in] mgr the audio manager to create the new streamer on. This may not be nullptr. * @returns the handle to a new capture streamer if it is successfully created. When this * handle is no longer needed, it must be destroyed with destroyCaptureStreamer(). * @returns @ref kInvalidStreamerId if the new capture streamer could not be created. */ OMNI_USD_API StreamerId createCaptureStreamer(AudioManager* mgr); /** destroys a capture streamer. * * @param[in] mgr the audio manager to destroy the streamer for. This may not be nullptr. * @param[in] id the streamer to be destroyed. If this streamer is currently running a * capture, it will be stopped first. Note that currently stopping one * streamer will stop all installed streamers. All but the removed one * will be restarted afterward. This will have the side effect of * overwriting each other streamer's file though. This can be avoided * by stopping all streamers simultaneously first with stopCaptures(). * @returns no return value. */ OMNI_USD_API void destroyCaptureStreamer(AudioManager* mgr, StreamerId id); /** sets the filename that a capture streamer will write to. * * @param[in] mgr the audio manager that owns the streamer @p id. This may not be nullptr. * @param[in] id the streamer to set the filename for. This handle will have been * returned from a previous call to createCaptureStreamer(). This may * not be @ref kInvalidStreamerId. * @param[in] filename the name and path of the file to write the streamer's data to once * its capture is started. If the filename is set here, a nullptr * filename may be passed into startCapture(). * @returns `true` if the given filename is valid and writable. * @returns `false` if the streamer ID @p id is not valid. * @returns `false` if the given filename is not writable. * * @note A streamer can have one of: a filename, an interface or an event stream. * Attaching this filename will remove the interface set by @ref setCaptureInterface() * or the event stream set by @ref createEventStreamForCapture(). * Calling either of @ref setCaptureInterface() or @ref createEventStreamForCapture() * will remove the attached filename. * */ OMNI_USD_API bool setCaptureFilename(AudioManager* mgr, StreamerId id, const char* filename); /** Sets the streamer interface that data will be streamed to. * @param[in] mgr The audio manager that owns the streamer @p id. This may not be nullptr. * @param[in] id The streamer to set the callbacks for. This handle will have been * returned from a previous call to createCaptureStreamer(). This may * not be @ref kInvalidStreamerId. * @param[in] streamer The interface that data will be sent to. * This may not be nullptr. * The lifetime of this object is managed by the caller. * This object must remain valid until destroyCaptureStreamer() is called. * @returns `true` if the operation succeeded. * @returns `false` if an error occurred. * * @note A streamer can have one of: a filename, an interface or an event stream. * Attaching this interface will remove the filename set by @ref setCaptureFilename() * or the event stream set by @ref createEventStreamForCapture(). * Calling either of @ref setCaptureFilename() or @ref createEventStreamForCapture() * will remove the attached interface. */ OMNI_USD_API bool setCaptureInterface(AudioManager* mgr, StreamerId id, Streamer* streamer); /** Creates an event stream that the capture streamer will send data to. * @param[in] mgr The audio manager that owns the streamer @p id. This may not be nullptr. * @param[in] id The streamer to set the callbacks for. This handle will have been * returned from a previous call to createCaptureStreamer(). This may * not be @ref kInvalidStreamerId. * * @returns The newly created event stream that's attached to streamer @p id. * This event streamer is a @ref carb::audio::EventStreamer, so you can * use @ref carb::audio::EventListener or another interoperable * implementation to subscribe to this event stream. * `IEventStreamPtr` objects are RAII ref counted, so you discard the * object or call `release()` on it when you're done with it. * If another streamer output is set with @ref setCaptureFilename() or * @ref setCaptureInterface() after this, the returned object will * still be valid but events will no longer be pushed to it. * * @note A streamer can have one of: a filename, an interface or an event stream. * Creating this event stream will remove the filename set by @ref setCaptureFilename() * or the interface set by @ref setCaptureInterface(). * Calling either of @ref setCaptureFilename() or @ref setCaptureInterface() * will disconnect the returned event stream. */ OMNI_USD_API carb::events::IEventStreamPtr createEventStreamForCapture(AudioManager* mgr, StreamerId id); /** starts the capture on a single streamer. * * @param[in] mgr the audio manager that owns the streamer @p id. This may not be nullptr. * @param[in] id the handle of the streamer to start. This handle will have been * returned from a previous call to createCaptureStreamer(). This * may not be @ref kInvalidStreamerId. * @param[in] filename the name and path of the filename to write the streamer's data to * once its capture is started. If a filename was set with a previous * call to setCaptureFilename() on the same streamer, this may be * nullptr to use that filename. If a non-nullptr and non-empty * filename is given here, it will always override any filename * previously set on the streamer. * Set this to `nullptr` if you're using a capture callback. * @returns true if the streamer is successfully started. Note that starting a streamer * currently has the side effect of stopping and restarting all other streamers * that are currently running a capture. This will result in each streamer's * output file being overwritten. If multiple streamers need to be started * simultaneously, startCaptures() should be used instead. * @returns false if the streamer could not be started. */ OMNI_USD_API bool startCapture(AudioManager* mgr, StreamerId id, const char* filename = nullptr); /** starts multiple streamers simultaneously. * * @param[in] mgr the audio manager that owns the streamer handles in @p ids. This * may not be nullptr. * @param[in] ids the table of streamers to start a capture on. Any entries that * are set to @ref kInvalidStreamerId in this table will be ignored. * Each valid entry must have had its filename set with * setCaptureFilename() first otherwise it will be skipped. Any * streamer that is already running a capture will be skipped, but * a side effect of this operation will be that its stream will be * closed and reopened thereby overwriting its file. this may not * be nullptr. * @param[in] count the total number of entries in the @p streamers table to start. * Since @ref kInvalidStreamerId entries are allowed in the table, * this count must include those invalid entries. * @returns true if at least one streamer is successfully started. * @returns false if no streamers could be started or all streamers were skipped for one * of the reasons listed under @p streamers. * * @remarks This attempts to start one or more streamers simultaneously. If successful, * all streamers are guaranteed to be started in sync with each other such that * their first written audio frame matches. If this method is used to start * multiple streamers, the stopCaptures() function must also be used to stop * those same streamers simultaneously. If another streamer starts or stops * independently, it will cause all streamers to be closed then reopened * which will overwrite each of their files. */ OMNI_USD_API bool startCaptures(AudioManager* mgr, StreamerId* ids, size_t count); /** stops the capture on a single streamer. * * @param[in] mgr the audio manager that owns the streamer @p id. This may not be nullptr. * @param[in] id the handle to the streamer to stop. This will have been returned from * a previous call to createCaptureStreamer(). If a capture is not running * on this streamer, it will be ignored. This may not be * @ref kInvalidStreamerId. * @returns true if the streamer is successfully stopped. * @returns false if the streamer handle was invalid or a capture was not running on it. */ OMNI_USD_API bool stopCapture(AudioManager* mgr, StreamerId id); /** stops the capture on multiple streamers simultaneously. * * @param[in] mgr the audio manager that owns the streamer handles in @p ids. This * may not be nullptr. * @param[in] ids the table of streamers to stop the capture on. Any * @ref kInvalidStreamerId entries will be ignored. Each valid * entry must be currently running a capture otherwise it will be * skipped. This may not be nullptr. * @param[in] count the total number of entries in the @p streamers table to stop. * Since @ref kInvalidStreamerId entries are allowed in the table, * this count must include those invalid entries. * @returns true if at least one streamer is successfully stopped. * @returns false if no streamers could be stopped. */ OMNI_USD_API bool stopCaptures(AudioManager* mgr, StreamerId* ids, size_t count); /** Wait until the capture streamer has been disconnected. * @param[in] mgr The audio manager that owns the streamer handles in @p ids. This * may not be nullptr. * @param[in] id The handle to the streamer to wait for. This will have been returned from * a previous call to createCaptureStreamer(). If a capture is not running * on this streamer, it will be ignored. This may not be * @ref kInvalidStreamerId. * @param[in] timeoutMilliseconds The maximum number of milliseconds to wait for the streamer to close. * @returns `true` if the capture streamer has disconnected. * @returns `false` if the call timed out before the streamer could disconnect. * @returns `false` if @p id uses an event stream; that system is asynchronous * and may continue to send data after the streamer has disconnected. * Unsubscribe from the event stream to ensure data callbacks will no * longer be sent. You can call the `unsubscribe()` method on the * `ISubscriptionPtr` object to unsubscribe from the stream. * @remarks Because stopCapture() does not stop the audio system or otherwise block * to ensure that the streamer is disconnected, you must call waitForCapture() * to verify that a capture streamer has actually finished. * This is mainly useful if you need to verify that a file written by * a streamer has finished being written. * For a callback streamer, waitForCapture() will begin returning true * immediately before close() is called. */ OMNI_USD_API bool waitForCapture(AudioManager* mgr, StreamerId id, size_t timeoutMilliseconds); /** retrieve the event stream for metadata changes. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @returns An event stream which is pushed when metadata is changed. * @returns nullptr if the event stream could not be created for some reason. * * @remarks This generates events of type @ref kEventMetadataChange when the * audio metadata in the USD scene changes. */ OMNI_USD_API carb::events::IEventStream* getMetadataChangeStream(AudioManager* mgr); /** Draw the waveform for the sound asset of an audio prim. * @param[in] mgr The stage audio manager instance that this function acts upon. * This must not be nullptr. * @param[in] primPath The path to sound prim which has the sound asset that will * be rendered. * Note that the `mediaOffsetStart` and `mediaOffsetEnd` * properties of the prim are used to choose the region of * the sound that is drawn. * The asset for this prim must have been loaded or the * call will fail. * @param[in] image The description of how the image should be rendered. * If this descriptor is invalid, the call will fail. * * @returns `true` if the image was successfully drawn. * @returns `false` if @p primPath isn't a valid prim. * @returns `false` if the sound asset has not been loaded yet. * @returns `false` if @p image was invalid. * * @remarks This will draw an RGBA image of the waveform of the sound asset * in use by a `OmniSound` prim. */ OMNI_USD_API bool drawWaveform(AudioManager* mgr, const char* primPath, AudioImageDesc *image); /** @} */ } } }
omniverse-code/kit/include/omni/usd/UsdContext.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #ifndef USD_CONTEXT_INCLUDES # error "Please include UsdContextIncludes.h before including this header or in pre-compiled header." #endif #include <carb/events/EventsUtils.h> #include <omni/usd/UsdContextOverrides.h> #include <omni/usd/Api.h> #include <omni/usd/UsdTypes.h> #include <omni/usd/ViewportTypes.h> #include <omni/usd/IUsdMutex.h> #include <omni/usd-abi/IPathAbi.h> #include <memory> PXR_NAMESPACE_OPEN_SCOPE class UsdGeomBBoxCache; PXR_NAMESPACE_CLOSE_SCOPE namespace carb { namespace renderer { struct Renderer; struct Context; enum class FrameDataType; } namespace scenerenderer { struct Scene; typedef struct Scene* SceneId; enum class CameraFit; struct SceneRenderer; struct Context; } namespace settings { struct ISettings; } } namespace rtx { namespace resourcemanager { // TODO: This must stay in sync with the actual typedef of SyncScopeId typedef uint32_t SyncScopeId; } } namespace omni { namespace timeline { class Timeline; using TimelinePtr = std::shared_ptr<Timeline>; }; namespace usd { typedef std::unordered_set<PXR_NS::SdfPath, PXR_NS::SdfPath::Hash> SdfPathUSet; class Selection; struct Layers; class UsdManager; namespace hydra { class IHydraEngine; struct IHydraSceneDelegate; struct CameraSettings; struct ViewportHydraRenderResults; struct OpaqueSharedHydraEngineContext; typedef struct OpaqueSharedHydraEngineContext* OpaqueSharedHydraEngineContextPtr; struct HydraEngineContextConfig; struct EngineCreationConfig; enum class EngineCreationFlags : uint32_t; OMNI_DECLARE_INTERFACE(IViewOverrideBase); } namespace audio { class AudioManager; } struct HydraEngineDesc { uint32_t tickRate; const char* engineName; const char* threadName; uint32_t currentHydraEngineIdx; uint32_t deviceMask; hydra::EngineCreationFlags flags; }; /** * @brief Specifies the initial set of prims to load when opening a UsdStage. */ enum class UsdContextInitialLoadSet { eLoadAll, eLoadNone, }; class UsdContext { public: // TODO: getContext needs to me removed, but it's called 160 times across 33 files. // Keep stub that invokes UsdManager::getContext for now to reduce MR /** * Gets a UsdContext. * * @param name of the UsdContext to get. The default context is named with empty string "". */ OMNI_USD_API static UsdContext* getContext(const std::string& name = ""); /** * Adds a hydra engine with associated renderer to context. The syncScopes determines * which rendering thread the hydra engine should run on. It should correspond * to the syncScope that was passed into the IHydraEngine during creation * * @return true if hydra engines are created, false otherwise */ OMNI_USD_API bool addHydraEngine(const char* name, omni::usd::hydra::IHydraEngine* hydraEngine, omni::usd::hydra::OpaqueSharedHydraEngineContextPtr ctx, const omni::usd::hydra::HydraEngineContextConfig& config, bool defer); /** * Destroys all hydra engines */ OMNI_USD_API void removeAllHydraEngines(); /** * Controls two things: * 1. Specifies the default hydra engine to use with hydraEngine = null in CreateViewport * 2. [TODO] Controls the hydra engine used for selection and picking, this feels wrong * and instead the picking api should include a hydra engine, and if non specified * use the default. This API should instead be setDefaultHydraEngine * */ OMNI_USD_API void setActiveHydraEngine(const char* name); /** * Return the name of active hydra engine */ OMNI_USD_API const char* getActiveHydraEngineName(void); /** * Adds a hydra scene delegate with associated input data to the context * * @param name should be unique, and will be used for identifying the scene delegate in * future calls. If the name is not unique, the add call will fail. * @param hydraEngine a string representing which hydra engine this delegate should be * added for. For instance, "rtx" * @param sceneDelegate the new scene delegate interface to add * @param delegateCreateParam a parameter to pass to the sceneDelegate's Create function * * @return true if successfully added, false otherwise */ OMNI_USD_API bool addHydraSceneDelegate(const char* name, const char* hydraEngine, const omni::usd::hydra::IHydraSceneDelegate& sceneDelegate, const char* delegateCreateParam); /** * Removes a hydra scene delegate * * @param name should match the name passed to addHydraSceneDelegate * * @return true if successfully removed, false otherwise */ OMNI_USD_API bool removeHydraSceneDelegate(const char* name); /** * Sets a global tansform for all content in a named hydra scene delegate * * @param name should match the name passed to addHydraSceneDelegate * @param xform a transform to apply to everything in the delegate * * @return true if successfully removed, false otherwise */ OMNI_USD_API bool setHydraSceneDelegateRootTransform(const char* name, const PXR_NS::GfMatrix4d& xform); /** * Gets a named attribute of a prim from a named hydra scene delegate * * @param name should match the name passed to addHydraSceneDelegate * @param primPath is the path to the prim containing the attribute * @param attributeName name of the attribute * @param attributeValue a VtValue to populate with the value of the attribute * * @return true if successfully accessed the scene delegate, false otherwise */ OMNI_USD_API bool getHydraSceneDelegateAttribute(const char* name, const PXR_NS::SdfPath &primPath, const PXR_NS::TfToken &attribueName, PXR_NS::VtValue &attributeValue); /** * Computs a world bounding box for all content in a named hydra scene delegate * * @param name should match the name passed to addHydraSceneDelegate * @param bbox is an array of 12 elements (4 x vector3, min/max bounds, center, size) */ OMNI_USD_API void computeHydraSceneDelegateWorldBoundingBox(const char* name, double* bbox); /** * Returns true if the Fabric scene delegate is in use. This changes * behavior in Fabric population, StageReaderWriter copies to the ring * buffer, and Fabric USD notice handling * * This is the value of the "/app/useFabricSceneDelegate" carb setting * when the stage is first loaded, and remains constant until the * next stage load to avoid bad behavior with already allocated scene * delegates and hydra engines. */ OMNI_USD_API bool useFabricSceneDelegate() const; /** * Checks if any Hydra Renderer supports MDL * * @return true if a Hydra renderer supports MDL, false otherwise. */ OMNI_USD_API bool hasMDLHydraRenderer() const; /** * Returns the RTX SceneRenderer if a "rtx" hydra engine * exists, otherwise nullptr. Note, tighly coupled with getScene() * * [TODO] Remove this API * Used for PrimitiveListDrawing * Trigger "compile shaders" for F9 * MaterialWatcher for getNeurayDbScopeName() * Kit.Legacy Editor python bindings for get_current_renderer_status * */ OMNI_USD_API carb::scenerenderer::SceneRenderer* getSceneRenderer() const; /** * Returns the RTX SceneRenderer Context if a "rtx" hydra engine * exists, otherwise nullptr * */ OMNI_USD_API carb::scenerenderer::Context* getSceneRendererContext() const; /** * Update function to be called once every frame. * * @param elapsedTime Elapsed time in seconds since last update call. * @return true if omniverse has new update during this frame. */ OMNI_USD_API bool update(float elapsedTime); /** * Creates a new USD stage. This is an asynchronous call. * * @param fn the callback function when stage is created or fails to create. */ OMNI_USD_API bool newStage(const OnStageResultFn& resultFn); /** * Synchronous version of @see newStage(const OnStageResultFn& resultFn); */ OMNI_USD_API bool newStage(); /** * Attaches an opened USD stage. This is an asynchronous call. * * @param stageId The stage id saved into UsdUtilsStageCache. * @param fn The callback function when stage is attached successfully or fails to attach. */ OMNI_USD_API bool attachStage(long int stageId, const OnStageResultFn& resultFn); /** * Opens a existing USD stage. This is an asynchronous call. * * @param url The file path. For Omniverse file, you must connect to Omniverse first and pass the url with prefix * "omniverse:". * @param fn The callback function when stage is opened or fails to open. * @param loadSet Specifies the initial set of prims to load when opening a UsdStage. */ OMNI_USD_API bool openStage(const char* fileUrl, const OnStageResultFn& resultFn, UsdContextInitialLoadSet loadSet = UsdContextInitialLoadSet::eLoadAll); /** * Synchronous version of @see openStage(const char* fileUrl, const OnStageResultFn& resultFn); */ OMNI_USD_API bool openStage(const char* fileUrl, UsdContextInitialLoadSet loadSet = UsdContextInitialLoadSet::eLoadAll); /** * Reopens current USD stage. This is an asynchronous call. * * @param fn The callback function when stage is reopened or fails to reopen. */ OMNI_USD_API bool reopenStage(const OnStageResultFn& resultFn, UsdContextInitialLoadSet loadSet = UsdContextInitialLoadSet::eLoadAll); /** * Synchronous version of @see reopenStage(const OnStageResultFn& resultFn); */ OMNI_USD_API bool reopenStage(UsdContextInitialLoadSet loadSet = UsdContextInitialLoadSet::eLoadAll); /** * Close current USD stage. This is an asynchronous call. * * @param fn The callback function when stage is closed for fails to close. */ OMNI_USD_API bool closeStage(const OnStageResultFn& resultFn); /** * Synchronous version of @see closeStage(const OnStageResultFn& resultFn); */ OMNI_USD_API bool closeStage(); /** * Saves specific layer only. This is an asynchronous call. * * @param layerIdentifier The layer to save. * @param fn The callback function when stage is saved or fails to save. * @return true if it's saved successfully. */ OMNI_USD_API bool saveLayer(const std::string& layerIdentifier, const OnLayersSavedResultFn& resultFn); /** * Synchronous version of @see saveLayer(const std::string& layerIdentifier, const OnStageResultFn& resultFn); */ OMNI_USD_API bool saveLayer(const std::string& layerIdentifier); /** * Saves specific layers only. This is an asynchronous call. * * @param newRootLayerPath If it's not empty, it means to do save root layer * to new place. This will trigger stage reload to open new root layer. * @param layerIdentifiers The layers to save. It will save all dirty changes of them. * @param fn The callback function when stage is saved or fails to save. * @return true if it's saved successfully. */ OMNI_USD_API bool saveLayers(const std::string& newRootLayerPath, const std::vector<std::string>& layerIdentifiers, const OnLayersSavedResultFn& resultFn); /** * Synchronous version of @see saveLayers */ OMNI_USD_API bool saveLayers(const std::string& newRootLayerPath, const std::vector<std::string>& layerIdentifiers); /** * Saves current USD stage. This is an asynchronous call. * * @param fn The callback function when stage is saved or fails to save. */ OMNI_USD_API bool saveStage(const OnLayersSavedResultFn& resultFn); /** * Synchronous version of @see saveStage(const OnStageResultFn& resultFn); */ OMNI_USD_API bool saveStage(); /** * Saves current USD stage to a different location. This is an asynchronous call. * * @param url The new location to save the USD stage. * @param fn The callback function when stage is saved or fails to save. */ OMNI_USD_API bool saveAsStage(const char* url, const OnLayersSavedResultFn& resultFn); /** * Synchronous version of @see saveAsStage(const char* url, const OnStageResultFn& resultFn); */ OMNI_USD_API bool saveAsStage(const char* url); /** * Exports current USD stage to a different location. This is an asynchronous call. * It will composite current stage into a single flattened layer. * * @param url The new location to save the USD stage. * @param fn The callback function when stage is saved or fails to save. */ OMNI_USD_API bool exportAsStage(const char* url, const OnStageResultFn& resultFn) const; /** * Synchronous version of @see exportAsStage(const char* url, const OnStageResultFn& resultFn) const; */ OMNI_USD_API bool exportAsStage(const char* url) const; /** * Checks if currently opened stage is created by calling @ref newStage. * * @return true if current stage is a new stage. */ OMNI_USD_API bool isNewStage() const; /** * Checks if it's safe to close stage at calling time. * * @return true if it is safe to close the stage (when current stage is fully opened). * false if it's unsafe to close current stage (when current stage is still being opened or closed). */ OMNI_USD_API bool canCloseStage() const; /** * Checks if a USD stage is opened and in a savable stage (not opening/closing in progress). * * @return true if the stage is opened and savable, false is no stage is opened or opened stage is not savable. */ OMNI_USD_API bool canSaveStage() const; /** * Checks if it's safe to open a different stage at calling time. * * @return true if it is safe to open a different stage (when current stage is fully opened or closed). * false if it's unsafe to open different stage (when current stage is still being opened or closed). */ OMNI_USD_API bool canOpenStage() const; /** * Checks if there is enough permissions to save the stage at calling time. * * @return true if it is possible to save the stage. * false if it's not possible to save current stage. */ OMNI_USD_API bool isWritable() const; /** * Gets the state of current stage. * * @return state of current stage. */ OMNI_USD_API omni::usd::StageState getStageState() const; /** * Gets UsdStage. * * @return UsdStageWeakPtr of current stage. */ OMNI_USD_API PXR_NS::UsdStageWeakPtr getStage() const; /** * Gets UsdStage id. * * @return id of current stage. */ OMNI_USD_API long int getStageId() const; /** * Gets the url of current stage. * * @return url of current stage. */ OMNI_USD_API const std::string& getStageUrl() const; /** * Checks if current stage is dirty. * * @return true if current stage is dirty. */ OMNI_USD_API bool hasPendingEdit() const; /** * Marks the edits state of current opened stage. It means changes are * pending to be saved if you set it to true, or false otherwise. This will * disgard the real state of opened stage. For example, if the opened stage * has real changes to be saved, hasPendingEdit() will still return false if * you set it to false. * * @param edit true to set pending edits state, false to unset pending edits state */ OMNI_USD_API void setPendingEdit(bool edit); /** * Gets the carb::events::IEventStreamPtr for StageEvent. * * @return the carb::events::IEventStream for StageEvent. */ OMNI_USD_API carb::events::IEventStreamPtr getStageEventStream(); /** * Gets the carb::events::IEventStreamPtr for RenderingEvent. * * @return the carb::events::IEventStream for RenderingEvent. */ OMNI_USD_API carb::events::IEventStreamPtr getRenderingEventStream(); /** * Sends a StageEvent to UsdStageEvent stream. * It chooses to send event sync/async based on syncUsdLoads option. * * @param event The event to be sent. * @param blockingAsync If the event is sent as async, true to wait on the event until it has been processed by all * subscribers. false to return immediately. */ OMNI_USD_API void sendEvent(carb::events::IEventPtr& event, bool blockingAsync = false); /** * Gets if the current stage is opened from Omniverse. * * @return true if the stage is opened from Omniverse. */ OMNI_USD_API bool isOmniStage() const; /** * Saves render settings to current opened USD stage. */ OMNI_USD_API void saveRenderSettingsToCurrentStage(); /** * Loads render settings from stage. * * @param stageId The stage id saved into UsdUtilsStageCache. */ OMNI_USD_API void loadRenderSettingsFromStage(long int stageId); /** * Gets the picked position in world space since last picking request. * * @param outWorldPos The picked world space position. * @return true if the result is valid, false if result is not valid. */ OMNI_USD_API bool getPickedWorldPos(ViewportHandle handle, carb::Double3& outWorldPos); /** * Gets the picked geometry hit data - position in world space and normal since last picking request. * * @param outWorldPos The picked world space position. * @param outNormal The picked normal. * @return true if the result is valid, false if result is not valid. */ OMNI_USD_API bool getPickedGeometryHit(ViewportHandle handle, carb::Double3& outWorldPos, carb::Float3& outNormal); /** * Gets the AABB of a prim. * * @param prim The prim to get AABB from. * @return The AABB of the prim. */ OMNI_USD_API PXR_NS::GfRange3d computePrimWorldBoundingBox(const pxr::UsdPrim& prim); /** * Gets the AABB of a path as carb::Doubles3 (primarily for Python). * * @param path The path to get AABB from. * @param aabbMin Where to store the min-extents. * @param aabbMax Where to store the max-extents. */ OMNI_USD_API void computePathWorldBoundingBox(const std::string& path, carb::Double3& aabbMin, carb::Double3& aabbMax); /** * Gets the GfMatrix4 of a prim as stored in the current bbox cache. * * @param prim The prim to get the transform of. * @return The world-space GfMatrix4 of the prim. */ OMNI_USD_API PXR_NS::GfMatrix4d computePrimWorldTransform(const pxr::UsdPrim& prim); /** * Gets the GfMatrix4 of a path as stored in the current bbox cache. * * @param path The path to a prim to get the transform of. * @return The world-space GfMatrix4 of the prim at the path. */ OMNI_USD_API void computePrimWorldTransform(const std::string& path, std::array<double, 16>& flattened); /** * Gets the stage loading status. * * @param The message of current stage loading. * @param filesLoaded Number of files already loaded. * @param totalFiles Total number of files to be loaded. */ OMNI_USD_API void getStageLoadingStatus(std::string& message, int32_t& filesLoaded, int32_t& totalFiles); /** * Returns all renderable paths for given prim path and its descendatns. * Renderable paths are prims of type instancer or geometry. * * @param primPath Prim path * @param renderablePathSet An unordered output path set. */ OMNI_USD_API void getRenderablePaths(PXR_NS::SdfPath primPath, SdfPathUSet& unOrderedRenderablePathSet); /** * Returns all renderable paths for given prim path and its descendatns. * Renderable paths are prims of type instancer or geometry. * * @param primPath Prim path * @param renderablePathSet An ordered output path set. */ template <typename T> OMNI_USD_API void getRenderablePaths(PXR_NS::SdfPath primPath, T& renderablePathSet); /** * Sets the mouse pickable state of a prim. * * @param primPath The path of the prim to set pickable state. * @param isPickable true if to set the prim to be pickable, false to set to unpickable. */ OMNI_USD_API void setPickable(const char* primPath, bool isPickable); /** * Returns the SceneId from the RTX SceneRenderer if a "rtx" hydra engine * exists. Note, usage coupled with getSceneRenderer() * * @return carb::scenerenderer::SceneId of current scene. */ OMNI_USD_API carb::scenerenderer::SceneId getRendererScene(); /** * Stops all async hydra rendering. Waits for completion of in-flight tasks * then requests RunLoops for rendering to exit */ OMNI_USD_API void stopAsyncRendering(); /** * Currently writing to usd during async rendering can cause a data race. * This necessitates the prescence of certain mutex to protect usd data access. * This call controls the usage of this mutex and will block until it is safe to begin writes. * * If a usd write occurs while this is disabled, undefined behavior is likely. * * Can only be called from the main thread for safety reasons. * * @note This function is deprecated. * * @param enabled if true enables the usage of a mutex to protect usd write operations. */ OMNI_USD_API void enableUsdWrites(bool enabled); /** * USD is a global shared resource. Invoking enableUsdLocking() * lets UsdContext manage the locking of this resource. * * enableUsdLocking will lock() the resource and manage locking/unlocking * going forward * * disableUsdLokcing() will unlock() the resource and make no subsequent * lock/unlock calls * * WARNING: enableUsdLocking and disableUsdLocking will only work when called from the main thread. */ OMNI_USD_API void enableUsdLocking(); OMNI_USD_API void disableUsdLocking(); OMNI_USD_API bool isUsdLockingEnabled(); /** * Returns the status of usd write back. If not true, writing to usd * may cause undefined behavior. * * @note This function is deprecated. * * @return true if write back is enabled and false otherwise. */ OMNI_USD_API bool usdWritesEnabled(); /** * Creates a new ViewportHandle for the hydraEngine at the specified tick rate * A tickrate of -1 means "render as fast as possible" * * Will not create a new HydraEngine, a HydraEngine must be available. HydraEngines * are added by addHydraEngine() * * @return ViewportHandle that's used in addRender(), getRenderResults(), destroyViewport() */ OMNI_USD_API ViewportHandle createViewport(const char* hydraEngine, uint32_t tickrate, omni::usd::hydra::EngineCreationFlags hydraEngineFlags); /** * Destroys the ViewportHandle that was created in createViewport * then requests RunLoops for rendering to exit */ OMNI_USD_API void destroyViewport(ViewportHandle viewportHandle); /** * Returns the latest available rendering result. It is assumed main/simulation thread runs * at the same rate or faster than hydra engine tick rate. If you query the FPS of the * returned value, it will match the hydra engine render rate * * bBlock == true will trigger hydraRendering() + checkForHydraResults() outside * of defined update order in omni::kit::update::UpdateOrderings *if necessary* * * @return ViewportHydraRenderResults the latest available hydra render result */ OMNI_USD_API const omni::usd::hydra::ViewportHydraRenderResults* getRenderResults(ViewportHandle viewportHandle, bool bBlock = false); /** * The ViewportHandle and the RenderProduct USD Prim Path to render * */ OMNI_USD_API void addRender(ViewportHandle handle, omni::usd::PathH renderProductPrimPath, const Picking* picking = nullptr); /** * Gets Selection instance. */ OMNI_USD_API Selection* getSelection(); /** * Gets const Selection instance. */ OMNI_USD_API const Selection* getSelection() const; /** * Retrieves the stage audio manager for use in the IStageAudio interface. * @returns The USD context's stage audio manager instance. * This is valid until the USD context is destroyed. * @returns nullptr if the stage audio manager was not loaded or failed * to load. */ OMNI_USD_API audio::AudioManager* getAudioManager() const; OMNI_USD_API void enableSaveToRecentFiles(); OMNI_USD_API void disableSaveToRecentFiles(); /** * By default UsdContext subscribes to IApp for updates. That can be disabled. Temp function until we refactor * Editor.cpp */ OMNI_USD_API void toggleAutoUpdate(bool enabled); /** * Query for instance IDs that are returned to synthetic data */ OMNI_USD_API size_t GetGeometryInstanceId(const PXR_NS::SdfPath& path, uint32_t* instanceList); /** * Query for geometry IDs that are returned to synthetic data */ OMNI_USD_API size_t GetGeometryId(const PXR_NS::SdfPath& path, uint32_t* geometryList); /** * Query for mesh instance path that is returned to synthetic data */ OMNI_USD_API const std::string& GetGeometryInstancePath(uint32_t instanceId); /** * Query for mesh geometry path that is returned to synthetic data */ OMNI_USD_API const std::string& GetGeometryPath(uint32_t geometryId); /** * Adds path to material/shader loading queue * * @param path the path of UsdShadeShader to create MDL materials. * @param recreate If to force recreating the inputs if it's already populated * @param loadInputs If to create MDL inputs as UsdAttribute on the UsdShadeShader prim. * @return true if scheduled successfully, false if inputs are already created and @ref recreate is false. */ OMNI_USD_API bool addToPendingCreatingMdlPaths(const std::string& path, bool recreate = false, bool loadInputs = true); /** * Get unique gizmo id. * When it not needed freeGizmoUID should be called so that it can be reused. * If it is not called this method return the same uid on next call with given path. * @param path Prim path that will be used for prim selection when gizmo uid is picked. */ OMNI_USD_API uint32_t getGizmoUID(const PXR_NS::SdfPath& path); /** * Allows to notify that gizmo uid is no longer used. * @param uid Gizmo unique id. */ OMNI_USD_API void freeGizmoUID(uint32_t uid); /** * Returns prim path for given gizmo uid. * @param uid Gizmo unique id. * @return A pointer to path for given uid or nullptr for invalid uid. */ OMNI_USD_API const PXR_NS::SdfPath* getPathForGizmoUID(uint32_t uid); /** * Register selection group. * Note: Supposed that this function called once at app initialization. */ OMNI_USD_API uint8_t registerSelectionGroup(); /** * Setup selection group outline color. */ OMNI_USD_API void setSelectionGroupOutlineColor(uint8_t groupId, const carb::Float4& color); /** * Setup selection group shade color. */ OMNI_USD_API void setSelectionGroupShadeColor(uint8_t groupId, const carb::Float4& color); /** * Set selection group for specified primitives path. */ OMNI_USD_API void setSelectionGroup(uint8_t groupId, const std::string& path); /** * Query for camera settings for the camera prim */ OMNI_USD_API omni::usd::hydra::CameraSettings getCameraSettings(const PXR_NS::SdfPath& path, double aspectRatio); /** * Set the aperture fit policy */ OMNI_USD_API void setCameraWindowPolicy(carb::scenerenderer::CameraFit policy); /** * Returns the current frame data based on the requested feature, such as profiler data. * If frame is not finished processing the data, the result of the previous frame is returned. * For multi-GPU, query the device count and then set deviceIndex to the desired device index. * * @param dataType Specified the requested data type to return. * @param deviceIndex The index of GPU device to get the frame data from. Set to zero in Single-GPU mode. * You may query the number of devices with FrameDataType::eGpuProfilerDeviceCount. * deviceIndex is ignored when the type is set to eGpuProfilerDeviceCount. * @param data A pointer to the returned data. Returns nullptr for failures or eGpuProfilerDeviceCount. * You may pass nullptr if you only need dataSize. * @param dataSize The size of the returned data in bytes, the number of structures, or device count based on * the dataType. For strings, it includes the null-termination. * @param engineIndex The index of an attached HydraEngine or -1 for the -active- HydraEngine. */ OMNI_USD_API void getFrameData(carb::renderer::FrameDataType dataType, uint32_t deviceIndex, void** data, size_t* dataSize, uint32_t engineIndex = -1); // Exposed to Python (see viewport based api getHydraEngineDesc() below) OMNI_USD_API size_t getAttachedHydraEngineCount() const; OMNI_USD_API const char* getAttachedHydraEngineName(size_t hydraEngineIdx) const; // Returns the HydraEngineDesc describing the hydra engine being used by this viewport OMNI_USD_API HydraEngineDesc getHydraEngineDesc(ViewportHandle handle) const; /** * Allow for viewoverrides to be added through the usd context */ OMNI_USD_API void registerViewOverrideToHydraEngines(omni::usd::hydra::IViewOverrideBase* viewOverride); /** * Allow for viewoverrides to be removed through the usd context */ OMNI_USD_API void unregisterViewOverrideToHydraEngines(omni::usd::hydra::IViewOverrideBase* viewOverride); /** * Allow scheduling override to be set for usd context */ OMNI_USD_API void setUsdContextSchedulingOverride(IUsdContextSchedulingOverride* schedulingOverride); /** * Reset scheduling override */ OMNI_USD_API void resetUsdContextSchedulingOverride(); /** * Retrieves the mutex for `*this` */ OMNI_USD_API IUsdMutex& getMutex(); /** * Stop any picking in flight for a specific View */ OMNI_USD_API void stopAllPickingForView(ViewPickingId pickingId); /** * Sets the timeline. This must be called before the context is used, right after its creation. */ OMNI_USD_API void setTimeline(const std::string& name = ""); /** * Retrieves the name of the timeline */ OMNI_USD_API std::string getTimelineName() const; /** * Retrieves the timeline */ OMNI_USD_API timeline::TimelinePtr getTimeline() const; /** * Retrieves the name of the context */ OMNI_USD_API std::string getName() const; /** * Trigger creation of the runloop associated with the given hydraengine configuration * (otherwise it will only be created the first time an associated viewport is renderer) */ int32_t getOrCreateRunloopThread(const char* name, const hydra::EngineCreationConfig& engineConfig, bool setWarmup); /** * Extract the engine warmup config from the settings */ typedef std::unordered_map<uint32_t, hydra::EngineCreationConfig> EngineWarmupConfig; static bool getEngineWarmupConfig(const carb::settings::ISettings& settings, const char* hydraEngineName, EngineWarmupConfig& config); /** * Return the default config for hydraengine creation for this engine type, and index */ static hydra::EngineCreationConfig getDefaultEngineCreationConfig(const carb::settings::ISettings& settings, const char* engineName, uint32_t engineIndex); /* * Opens a existing USD stage with specified session layer. This is an asynchronous call. * This funciton can be used to speed up the stage composition to avoid re-composing stage * caused by inserting a sublayer under session layer after stage opened. * * @param url The file path. For Omniverse file, you must connect to Omniverse first and pass the url with prefix * "omniverse:". * @param sessionLayerUrl. The specified session layer to use. If it's empty or not provided, it will work * as the same as openStage. If it's provided but cannot be opened, it will return false. * @param fn The callback function when stage is opened or fails to open. * @param loadSet Specifies the initial set of prims to load when opening a UsdStage. */ OMNI_USD_API bool openStageWithSessionLayer(const char* fileUrl, const char* sessionLayerUrl, const OnStageResultFn& resultFn, UsdContextInitialLoadSet loadSet = UsdContextInitialLoadSet::eLoadAll); /* * Tries to cancel save. It only take effects when it's called after receiving event StageEventType::eSaving or StageEventType::eSettingsSaving. */ OMNI_USD_API void tryCancelSave(); private: /** * Constructor. * * @param name The name of the context. */ UsdContext(const std::string& name); /** * No copy. */ UsdContext(const UsdContext&) = delete; /** * No assign. */ UsdContext& operator=(const UsdContext&) = delete; /** * Destructor. */ ~UsdContext(); friend UsdManager; struct Impl; std::unique_ptr<Impl> m_impl; }; } }
omniverse-code/kit/include/omni/usd/Api.h
// Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #if defined _WIN32 # ifdef OMNI_USD_EXPORTS # define OMNI_USD_API __declspec(dllexport) # else # define OMNI_USD_API __declspec(dllimport) # endif #else # ifdef OMNI_USD_EXPORTS # define OMNI_USD_API __attribute__((visibility("default"))) # else # define OMNI_USD_API # endif #endif
omniverse-code/kit/include/omni/usd/UsdManager.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/IObject.h> #include <omni/kit/KitTypes.h> #include <omni/usd/Api.h> #include <memory.h> namespace carb { namespace settings { struct ISettings; } } namespace gpu { namespace foundation { struct GpuFoundation; } } namespace carb { namespace renderer { struct Context; struct Renderer; } } namespace omni { namespace usd { namespace hydra { class IHydraEngineFactory; enum class EngineCreationFlags : uint32_t; using IHydraEngineFactoryPtr = carb::ObjectPtr<IHydraEngineFactory>; struct OpaqueSharedHydraEngineContext; typedef struct OpaqueSharedHydraEngineContext* OpaqueSharedHydraEngineContextPtr; /** * Configuration of an hydraengine * - UsdManager::getOrCreateHydraEngine will return the same engine if the configuration parameters match */ struct EngineCreationConfig { EngineCreationFlags flags; uint32_t tickRateInHz = uint32_t(-1); }; inline bool operator<(const omni::usd::hydra::EngineCreationConfig& a, const omni::usd::hydra::EngineCreationConfig& b) { if (a.tickRateInHz == b.tickRateInHz) { return a.flags < b.flags; } return (a.tickRateInHz < b.tickRateInHz); } } class UsdContext; class UsdManager { public: /** * Creates a UsdContext. * * @param name Name of the UsdContext to be created. The default context is named with empty string "". * @return Created UsdContext, or existing one if a context with same name is already created. */ OMNI_USD_API static UsdContext* createContext(const std::string& name = ""); /** * Destroys a UsdContext. * * @param name of the UsdContext to be destroyed. The default context is named with empty string "". * @return true if destroyed successfully, false if failed. */ OMNI_USD_API static bool destroyContext(const std::string& name = ""); /** * Gets a UsdContext. * * @param name of the UsdContext to get. The default context is named with empty string "". */ OMNI_USD_API static UsdContext* getContext(const std::string& name = ""); /* * Use by IHydraEngines at load/unload to register their factory class * */ OMNI_USD_API static void registerHydraEngineFactory(const char* name, omni::usd::hydra::IHydraEngineFactoryPtr factory); OMNI_USD_API static void unregisterHydraEngineFactory(const char* name); /* * Creates a new hydra engine instance using the registered factory and attaches * it to the UsdContext. The hydra engine will have it's own unique syncScope * and run in a separate rendering thread if asyncRendering is enabled */ OMNI_USD_API static void addHydraEngine(const char* name, UsdContext* context); /** * Adds all loaded hydra engines to the USDContext. Used at startup * * All loaded hydra engines will share a single syncScope and run in a single rendering * thread when asyncRendering is enabled. * * Return type, first is the name of the engine, second is an opaque pointer to engine data * * Until legacy carb settings /renderer/active and /renderer/enabled are no longer used to * set the default hydra engine or override what extensions are "enabled", we need this function * to support legacy STARTUP behavior and it should be executed once AFTER all the extensions have loaded */ OMNI_USD_API static std::vector<std::pair<std::string, hydra::OpaqueSharedHydraEngineContextPtr>> attachAllHydraEngines( UsdContext* context); /* * Advances all the hydra engines sync scopes rtx::kMaxFramesInFlight to trigger * deferred releases of all objects. Typically used to allow fully unloading * the USD Mesh data before loading another USD Stage */ OMNI_USD_API static void advanceAllSyncScopes(); /** * releaseAllHydraEngines() is designed to support the legacy Kit 1.0 Editor * * Unregisters all HydraEngineFactories and destroys any created OpaqueSharedHydraEngineContextPtr. * * @param context Optional, if valid, context releases all hydra engines prior to destroying any * OpaqueSharedHydraEngineContextPtr */ OMNI_USD_API static void releaseAllHydraEngines(UsdContext* context = nullptr); /* * setSettingsPlugin() + setFoundationPlugins() are required initialization * methods of USDManager */ OMNI_USD_API static void setSettingsPlugin(carb::settings::ISettings* settings); OMNI_USD_API static void setFoundationPlugins(gpu::foundation::GpuFoundation* gpuFoundation, uint32_t syncScope); // Until Kit redoes their implmentation of GpuFoundations integration, let's provide a global // getter for those that need GpuFoundations to be unblocked OMNI_USD_API static void getFoundationPlugins(gpu::foundation::GpuFoundation** outGpuFoundation); /** * @brief Called when from omni.usd extension shutdown * */ OMNI_USD_API static void shutdownUsd(); /** * Gets corresponding UsdContext of specified stage id. */ OMNI_USD_API static UsdContext* getContextFromStageId(long int stageId); /* * Obtains a hydraengine instance with a matching config to the one passed in (If none exist yet, will create a new one). * A new hydra engine will have it's own unique syncScope and run in a separate rendering thread if asyncRendering is enabled */ OMNI_USD_API static hydra::OpaqueSharedHydraEngineContextPtr getOrCreateHydraEngine(const char* name, UsdContext* context, const hydra::EngineCreationConfig& config); private: /** * Constructor. */ UsdManager(); /** * No copy. */ UsdManager(const UsdManager&) = delete; /** * No assign. */ UsdManager& operator=(const UsdManager&) = delete; /** * Destructor. */ ~UsdManager(); static UsdManager* instance(); struct Impl; std::unique_ptr<Impl> m_impl; }; } }
omniverse-code/kit/include/omni/usd/UsdContextIncludes.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // Define this macro so that other headers that are supposed to have this header included before them can check against // it. #define USD_CONTEXT_INCLUDES // Include cstdio here so that vsnprintf is properly declared. This is necessary because pyerrors.h has // #define vsnprintf _vsnprintf which later causes <cstdio> to declare std::_vsnprintf instead of the correct and proper // std::vsnprintf. By doing it here before everything else, we avoid this nonsense. #include <cstdio> // Python must be included first because it monkeys with macros that cause // TBB to fail to compile in debug mode if TBB is included before Python #include <boost/python/object.hpp> #include <pxr/usd/usdGeom/bboxCache.h> #include <pxr/usd/usdLux/cylinderLight.h> #include <pxr/usd/usdLux/diskLight.h> #include <pxr/usd/usdLux/distantLight.h> #include <pxr/usd/usdLux/domeLight.h> #include <pxr/usd/usdLux/rectLight.h> #include <pxr/usd/usdLux/sphereLight.h> #include <pxr/usd/usdShade/material.h> #include <pxr/usd/usdShade/shader.h>
omniverse-code/kit/include/omni/usd/PathUtils.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Framework.h> #include <carb/extras/Path.h> #include <random> #include <string> #if CARB_PLATFORM_WINDOWS # define strncasecmp(x, y, z) _strnicmp(x, y, z) #endif namespace omni { namespace usd { constexpr const char* const kWritableUsdFileExts = "usd|usda|usdc|live"; class PathUtils { public: /** * Checks if path is omniverse path (prefixed with omni:). * * @param path Path string. * @return true if it's omniverse path. */ static bool isOmniPath(const std::string& path) { return path.length() > 10 && strncasecmp(path.c_str(), "omniverse:", 10) == 0; } /** * Appends a random number to path. This is used to construct unique path. * * @param path Path string. * @return unique path string. */ static std::string appendRandomNumberToFilename(const std::string& path) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<uint64_t> dis(0, std::numeric_limits<uint64_t>::max()); carb::extras::Path carbPath(path); return carbPath.getParent() / carbPath.getStem() + std::to_string(dis(gen)) + carbPath.getExtension(); } }; } }
omniverse-code/kit/include/omni/usd/Selection.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #ifndef USD_CONTEXT_INCLUDES # error "Please include UsdContextIncludes.h before including this header or in pre-compiled header." #endif #include <omni/kit/KitTypes.h> #include <omni/usd/Api.h> #include <memory.h> #include <mutex> namespace omni { namespace usd { class UsdContext; typedef std::vector<std::pair<PXR_NS::SdfPath, uint8_t>> SdfPathGroupIdVector; class Selection { public: Selection(UsdContext* usdContext); ~Selection(); Selection(const Selection&) = delete; Selection& operator=(const Selection&) = delete; /** * Sets all selected prim paths. This will replace existing selected paths with given ones. * * @param paths The vector of selected prim paths to be set. * @param expandInStage true to expand the path in Stage Window on selection. * @return true if the selected paths are changed. */ OMNI_USD_API bool setSelectedPrimPaths(const std::vector<std::string>& paths, bool expandInStage); /** * Gets all selected prim paths. * * @return a vector containing all selected prim paths. */ OMNI_USD_API std::vector<std::string> getSelectedPrimPaths(); /** * Clears all selected prim paths. * * @return true if the selected paths are changed. */ OMNI_USD_API bool clearSelectedPrimPaths(); /** * Sets all selected prim paths. This will replace existing selected paths with given ones. * * @param paths The array of selected prim paths to be set. * @param count Size of the array. * @param expandInStage true to expand the path in Stage Window on selection. * @return true if the selected paths are changed. */ OMNI_USD_API bool setPrimPathSelected( const char* path, bool selected, bool forcePrim, bool clearSelected, bool expandInStage); /** * Gets if a prim path is selected. * * @return true if prim path is selected. */ OMNI_USD_API bool isPrimPathSelected(const char* path) const; /** * Something has changed (layers may have been added, removed, or muted), where we can no longer * guarantee the selection paths are valid, so mark them as dirty. * */ OMNI_USD_API void setDirty(); OMNI_USD_API bool add(PXR_NS::SdfPath path, bool forcePrimMode, bool clearSelected, bool expandInStage); OMNI_USD_API bool remove(PXR_NS::SdfPath path, bool forcePrimMode); OMNI_USD_API bool removePathAndDescendents(PXR_NS::SdfPath path); OMNI_USD_API PXR_NS::SdfPathVector& getRenderablePaths(); OMNI_USD_API PXR_NS::SdfPathVector& getAddedRenderablePaths(); OMNI_USD_API PXR_NS::SdfPathVector& getRemovedRenderablePaths(); OMNI_USD_API const PXR_NS::SdfPathVector& getModelPaths(); OMNI_USD_API const PXR_NS::SdfPathVector& getCameraPaths(); OMNI_USD_API const std::vector<PXR_NS::UsdLuxDistantLight>& getDistantLights(); OMNI_USD_API const std::vector<PXR_NS::UsdLuxRectLight>& getRectLights(); OMNI_USD_API const std::vector<PXR_NS::UsdLuxSphereLight>& getSphereLights(); OMNI_USD_API const std::vector<PXR_NS::UsdLuxCylinderLight>& getCylinderLights(); OMNI_USD_API const std::vector<PXR_NS::UsdLuxDiskLight>& getDiskLights(); OMNI_USD_API const std::vector<PXR_NS::UsdPrim>& getSounds(); OMNI_USD_API const std::vector<PXR_NS::UsdPrim>& getListeners(); OMNI_USD_API const std::vector<PXR_NS::UsdShadeMaterial>& getMaterials(); OMNI_USD_API const std::vector<PXR_NS::UsdShadeShader>& getShaders(); OMNI_USD_API PXR_NS::SdfPath getQueriedPath() const; OMNI_USD_API void setQueriedPath(PXR_NS::SdfPath path, bool addOutline = false); OMNI_USD_API void getPrimBySelectionMode(PXR_NS::UsdPrim& prim) const; OMNI_USD_API std::recursive_mutex& getMutex(); OMNI_USD_API void update(); OMNI_USD_API bool setSelectedPrimPathsV2(const PXR_NS::SdfPathVector& paths); OMNI_USD_API PXR_NS::SdfPathVector getSelectedPrimPathsV2() const; OMNI_USD_API bool empty() const; OMNI_USD_API void selectAllPrims(const std::vector<std::string>& typeNames); OMNI_USD_API void selectInvertedPrims(); /* * Custom selection with groupId. * ibychkov: I added new methods for that to be sure there is no regression for regular selection. * But it might be considered just add groupId support for regular selection. */ OMNI_USD_API SdfPathGroupIdVector& getCustomRenderablePaths(); OMNI_USD_API SdfPathGroupIdVector& getAddedCustomRenderablePaths(); OMNI_USD_API PXR_NS::SdfPathVector& getRemovedCustomRenderablePaths(); OMNI_USD_API void setCustomSelection(const PXR_NS::SdfPathVector& paths, uint8_t groupId); OMNI_USD_API void clearCustomSelection(); private: struct Impl; std::unique_ptr<Impl> m_impl; }; } }
omniverse-code/kit/include/omni/usd/UsdContextOverrides.h
// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/core/Api.h> #include <omni/core/IObject.h> #include <omni/core/OmniAttr.h> namespace omni { namespace usd { OMNI_DECLARE_INTERFACE(IUsdContextSchedulingOverride); class IUsdContextSchedulingOverride_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.usd.IUsdContextSchedulingOverride")> { protected: /** * Function to run before scheduling the next round of renders * * @param elapsedTime time since the call was called last time * @param asyncRendering whether async rendering was enabled * * @return whether pre render scheduling function succeeded */ virtual void preScheduleRender_abi(float elapsedTime, bool asyncRendering) noexcept = 0; /** * Function to run after scheduling and before usd lock is grabbed again. */ virtual void postRenderScheduledGate_abi() noexcept = 0; /** * Function to run after scheduling when main thread has usd lock again. */ virtual void postRenderUsdLockAcquired_abi() noexcept = 0; }; } } template <> class omni::core::Generated<omni::usd::IUsdContextSchedulingOverride_abi> : public omni::usd::IUsdContextSchedulingOverride_abi { public: /** * Function to run before scheduling the next round of renders * * @param elapsedTime time since the call was called last time * @param asyncRendering whether async rendering was enabled * * @return whether pre render scheduling function succeeded */ inline void preScheduleRender(float elapsedTime, bool asyncRendering) { preScheduleRender_abi(elapsedTime, asyncRendering); } /** * Function to run after scheduling and before usd lock is grabbed again. */ inline void postRenderScheduledGate() { postRenderScheduledGate_abi(); } /** * Function to run after scheduling when main thread has usd lock again. */ inline void postRenderUsdLockAcquired() { postRenderUsdLockAcquired_abi(); } }; namespace omni { namespace usd { using UsdContextSchedulingOverridePtr = omni::core::ObjectPtr<omni::usd::IUsdContextSchedulingOverride>; } }
omniverse-code/kit/include/omni/usd/LayerUtils.h
// Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #ifndef USD_UTILS_INCLUDES # error "Please include UtilsIncludes.h before including this header or in pre-compiled header." #endif #include "PathUtils.h" #include <carb/extras/Path.h> #include <carb/filesystem/IFileSystem.h> #include <carb/profiler/Profile.h> #include <carb/tokens/TokensUtils.h> #include <carb/profiler/Profile.h> #include <omni/kit/AssetUtils.h> #include <omni/kit/KitUtils.h> #include <omni/usd/UsdUtils.h> #include <pxr/base/tf/pathUtils.h> #include <pxr/pxr.h> #include <pxr/usd/ar/resolver.h> #include <pxr/usd/ar/resolverScopedCache.h> #include <pxr/usd/sdf/assetPath.h> #include <pxr/usd/sdf/copyUtils.h> #include <pxr/usd/sdf/layerUtils.h> #include <pxr/usd/usd/stage.h> #include <pxr/usd/usdUtils/flattenLayerStack.h> #include <random> namespace omni { namespace usd { /** * USD does not provide the way to save muteness. Kit will save those inside the custom data of root layer * of the stage. */ static const std::string kLayerMuteCustomKey = "omni_layer:muteness"; static const std::string kLayerNameCustomKey = "omni_layer:custom_name"; static const std::string kLayerLockedCustomKey = "omni_layer:locked"; static constexpr size_t kLayerIndexNone = SIZE_MAX; class LayerUtils { public: /** * Gets the global muteness. Global muteness is the one that's saved inside the custom data of stage's root layer. * @param stage Root layer to get muteness from. * @param layerIdentifier Layer identifier. * @return True if it muted, or false otherwise. */ static bool getLayerGlobalMuteness(PXR_NS::SdfLayerRefPtr rootLayer, const std::string& layerIdentifier) { bool muted = false; getLayerCustomFieldInRootLayer<bool>(rootLayer, layerIdentifier, kLayerMuteCustomKey, muted); return muted; } /** * Set global muteness for layer. * @param rootLayer Root layer to save muteness to. * @param layerIdentifier Layer identifier. * @param muted Mute or not. * @return true if it's successful. */ static void setLayerGlobalMuteness(PXR_NS::SdfLayerRefPtr rootLayer, const std::string& layerIdentifier, bool muted) { setLayerCustomFieldInRootLayer(rootLayer, layerIdentifier, kLayerMuteCustomKey, muted); } /** * Clear all muteness from stage. */ static void clearLayerMutenessFromCustomFields(PXR_NS::SdfLayerRefPtr rootLayer) { auto rootLayerCustomData = rootLayer->GetCustomLayerData(); rootLayerCustomData.EraseValueAtPath(kLayerMuteCustomKey); rootLayer->SetCustomLayerData(rootLayerCustomData); } /** * Set layer's muteness based on global muteness saved in custom fields from stage. */ static void setLayerMuteStateFromCustomFields(PXR_NS::UsdStageRefPtr stage) { auto rootLayer = stage->GetRootLayer(); auto rootLayerCustomData = rootLayer->GetCustomLayerData(); const PXR_NS::VtValue* muteness = rootLayerCustomData.GetValueAtPath(kLayerMuteCustomKey); PXR_NS::VtDictionary mutenessDict; if (muteness && !muteness->IsEmpty()) { mutenessDict = muteness->Get<PXR_NS::VtDictionary>(); } std::vector<std::string> mutedLayers; std::vector<std::string> unmutedLayers; for (const auto& identifierMutenessPair : mutenessDict) { const std::string& layerIdentifier = rootLayer->ComputeAbsolutePath(identifierMutenessPair.first); bool muted = identifierMutenessPair.second.Get<bool>(); if (muted != stage->IsLayerMuted(layerIdentifier)) { if (muted) { mutedLayers.push_back(layerIdentifier); } else { unmutedLayers.push_back(layerIdentifier); } } } if (mutedLayers.size() > 0 || unmutedLayers.size() > 0) { stage->MuteAndUnmuteLayers(mutedLayers, unmutedLayers); } } // Checks if the layer can be saved to disk. static bool isLayerSavable(PXR_NS::UsdStageRefPtr stage, const std::string& layerIdentifier) { bool anonymous = PXR_NS::SdfLayer::IsAnonymousLayerIdentifier(layerIdentifier); bool locked = LayerUtils::isLayerLocked(stage->GetRootLayer(), layerIdentifier); bool muted = stage->IsLayerMuted(layerIdentifier); bool writable = omni::kit::isWritableUrl(layerIdentifier.c_str());; if (!anonymous && !locked && !muted && writable) { return true; } return false; } // Gets the customized layer lock status in root layer. static bool isLayerLocked(PXR_NS::SdfLayerRefPtr rootLayer, const std::string& layerIdentifier) { // Layer lock is a concept extended from Kit that does not use real ACL // from disk to control the lock of layer but only an instruction to // save that this layer is locked and should not be touched. bool locked = false; getLayerCustomFieldInRootLayer<bool>(rootLayer, layerIdentifier, kLayerLockedCustomKey, locked); return locked; } static void setLayerLockStatus(PXR_NS::SdfLayerRefPtr rootLayer, const std::string& layerIdentifier, bool locked) { setLayerCustomFieldInRootLayer(rootLayer, layerIdentifier, kLayerLockedCustomKey, locked); } static std::string getLayerName(const std::string& identifier, bool includeExtension = true) { if (PXR_NS::SdfLayer::IsAnonymousLayerIdentifier(identifier)) { return identifier; } carb::extras::Path path; auto omniclient = carb::getCachedInterface<carb::omniclient::IOmniClient>(); if (omniclient) { auto url = omniclient->breakUrl(identifier.c_str()); if (url && url->path) { path = carb::extras::Path(url->path); } else { path = carb::extras::Path(identifier); } } else { path = carb::extras::Path(identifier); } if (includeExtension) { return path.getFilename().getString(); } else { return path.getStem().getString(); } } static std::string getCustomLayerName(PXR_NS::SdfLayerRefPtr layer) { PXR_NS::VtDictionary valueMap; const PXR_NS::VtDictionary& layerCustomData = layer->GetCustomLayerData(); const auto& customDataValue = layerCustomData.GetValueAtPath(kLayerNameCustomKey); if (customDataValue && !customDataValue->IsEmpty()) { auto value = customDataValue->Get<PXR_NS::TfToken>(); return value.GetString(); } else { return LayerUtils::getLayerName(layer->GetIdentifier()); } } /** * Select a existing layer as edit target. * * @param stage The stage of the operation. * @param layerIdentifier Layer identifier. * @return true if the layer is selected, false otherwise. * **/ static bool setAuthoringLayer(PXR_NS::UsdStageRefPtr stage, const std::string& layerIdentifier) { if (stage->IsLayerMuted(layerIdentifier)) { return false; } const auto& sublayer = PXR_NS::SdfLayer::FindOrOpen(layerIdentifier); if (!sublayer) { return false; } PXR_NS::UsdEditTarget editTarget(sublayer); stage->SetEditTarget(editTarget); return true; } /** * Gets the current authoring layer (edit target). * * @param stage The stage of the operation. * @return layer identifier of the authoring layer. **/ static std::string getAuthoringLayerIdentifier(PXR_NS::UsdStageRefPtr stage) { if (stage->GetEditTarget().GetLayer()) { return stage->GetEditTarget().GetLayer()->GetIdentifier(); } return ""; } /** * Gets the layer identifier of sublayer at specific position. * * @param hostLayer Layer handle to query. * @param position Sublayer position. It should not be over the count of total sublayers. * @param sublayerIdentifier Returned identifier. * @return false if position is over the count of total sublayers, or true otherwise. */ static bool getSublayerIdentifier(const PXR_NS::SdfLayerRefPtr& hostLayer, size_t position, std::string& sublayerIdentifier) { if (position >= hostLayer->GetNumSubLayerPaths()) { return false; } const auto& sublayerPaths = hostLayer->GetSubLayerPaths(); const std::string& sublayer = sublayerPaths[position]; sublayerIdentifier = computeAbsolutePath(hostLayer, sublayer); return true; } /** * Gets sublayer position of specific layer in parent layer. * * @param hostLayer Parent layer to search. * @param layerIdentifier Layer identifier. * @return Sublayer position if it's found, or kLayerIndexNone if it's not found. */ static size_t getSublayerPositionInHost(const PXR_NS::SdfLayerRefPtr& hostLayer, const std::string& layerIdentifier) { const auto& sublayerPaths = hostLayer->GetSubLayerPaths(); for (size_t i = 0; i < sublayerPaths.size(); i++) { const auto& absolutePath = computeAbsolutePath(hostLayer, sublayerPaths[i]); if (normalizeUrl(absolutePath) == normalizeUrl(layerIdentifier)) { return i; } } return kLayerIndexNone; } /** * Gets the sublayer handle at specific position. * * @param hostLayer Layer handle. * @param position Sublayer position. * @return layer handle if position is valid, otherwise, nullptr is returned. */ static PXR_NS::SdfLayerRefPtr getSublayer(const PXR_NS::SdfLayerRefPtr& hostLayer, size_t position) { if (position >= hostLayer->GetNumSubLayerPaths()) { return nullptr; } const auto& sublayerPaths = hostLayer->GetSubLayerPaths(); const std::string& path = sublayerPaths[position]; const auto& absolutePath = hostLayer->ComputeAbsolutePath(path); return findOrOpen(absolutePath); }; /** * Adds a new layer. * * @param stage The stage this sublayer will be inserted to. * @param hostLayer Host Layer to create sublayer. * @param position The position to insert the new layer before. If position > sublayerCount, * it will create the layer at the end. * @param anonymous If the layer should be anonymous. Anonymous layer is in memory only and will not be saved to * file. * @param saveOnCreate Saves layer file after create or not. * @param finalPosition Real sublayer position of new layer in hostLayer. It's valid only when the return is not * nullptr. * @return layer handle. It will be nullptr if it's failed. */ static PXR_NS::SdfLayerRefPtr createSublayer(PXR_NS::UsdStageRefPtr stage, PXR_NS::SdfLayerRefPtr hostLayer, size_t position, const char* layerPath, bool saveOnCreate, size_t& finalPosition) { size_t newLayerPos; size_t numLayers = hostLayer->GetNumSubLayerPaths(); if (position > numLayers) { newLayerPos = numLayers; } else { newLayerPos = position; } // It's possible that this layer is already existed PXR_NS::SdfLayerRefPtr newLayer = nullptr; if (layerPath && layerPath[0] != '\0') { newLayer = PXR_NS::SdfLayer::FindOrOpen(layerPath); if (!newLayer) { if (saveOnCreate) { newLayer = PXR_NS::SdfLayer::CreateNew(layerPath); } else { newLayer = PXR_NS::SdfLayer::New(hostLayer->GetFileFormat(), layerPath); } } else { newLayer->Clear(); } } else { newLayer = PXR_NS::SdfLayer::CreateAnonymous(); } if (newLayer) { std::string relativePath = newLayer->GetIdentifier(); UsdUtils::makePathRelativeToLayer(hostLayer, relativePath); hostLayer->InsertSubLayerPath(relativePath, (int)newLayerPos); finalPosition = newLayerPos; } return newLayer; } /** * Inserts a layer into the current sublayers. * * @param stage The stage this sublayer will be inserted to. * @param hostLayer The Layer to create sublayer. * @param position The position to insert the new layer before. If sublayerPosition > sublayerCount, * it will create the layer at the end. * @param name Name of the new layer. * @param path Absolute path of the new layer. * @param newLayerIndex Real sublayer position this layer creates to. It's valid only when return is not nullptr. * @return layer identifier of created identifier. It's failed if it's empty. */ static PXR_NS::SdfLayerRefPtr insertSublayer(PXR_NS::UsdStageRefPtr stage, PXR_NS::SdfLayerRefPtr hostLayer, size_t position, const std::string& path, size_t& newLayerIndex) { size_t newLayerPos; size_t numLayers = hostLayer->GetNumSubLayerPaths(); if (position > numLayers) { newLayerPos = numLayers; } else { newLayerPos = position; } const auto& absolutePath = computeAbsolutePath(hostLayer, path); const PXR_NS::SdfLayerRefPtr& newLayer = PXR_NS::SdfLayer::FindOrOpen(absolutePath); if (newLayer) { std::string relativePath = absolutePath; UsdUtils::makePathRelativeToLayer(hostLayer, relativePath); hostLayer->InsertSubLayerPath(relativePath, (int)newLayerPos); } else { CARB_LOG_ERROR("ERROR! Failed to insert sublayer at path %s", absolutePath.c_str()); } newLayerIndex = newLayerPos; return newLayer; } /** * Replaces layer with new path. * * @param stage The stage this sublayer will be inserted to. * @param hostLayer The layer handle to create sublayer. * @param position The position to replace. It must be [0, num_of_sublayers). * @param path New layer path. * @return layer handle. It will be nullptr if it's failed. **/ static PXR_NS::SdfLayerRefPtr replaceSublayer(PXR_NS::UsdStageRefPtr stage, PXR_NS::SdfLayerRefPtr hostLayer, size_t position, const std::string& path) { if (position >= hostLayer->GetNumSubLayerPaths()) { CARB_LOG_ERROR("ERROR! Failed to replace sublayer as position %zu is invalid", position); return nullptr; } const auto& absolutePath = computeAbsolutePath(hostLayer, path); const PXR_NS::SdfLayerRefPtr& newLayer = PXR_NS::SdfLayer::FindOrOpen(absolutePath); if (newLayer) { PXR_NS::SdfChangeBlock changeBlock; auto sublayerPaths = hostLayer->GetSubLayerPaths(); std::string oldSublayerPath = sublayerPaths[position]; oldSublayerPath = computeAbsolutePath(hostLayer, oldSublayerPath); std::string relativePath = absolutePath; UsdUtils::makePathRelativeToLayer(hostLayer, relativePath); sublayerPaths[position] = relativePath; } else { CARB_LOG_ERROR("ERROR! Failed to replace sublayer at path %s", absolutePath.c_str()); } return newLayer; } /** * Delete a existing layer. Delete root layer will do nothing. * * @param stage The stage of the operation. * @param hostLayer The layer to create sublayer. * @param position The sublayer position to delete. If it's not a valid sublayer index, it will do nothing. * @return true if the layer is deleted, false otherwise. * **/ static bool deleteSublayer(PXR_NS::SdfLayerRefPtr hostLayer, size_t position) { std::string sublayerIdentifier; if (!getSublayerIdentifier(hostLayer, position, sublayerIdentifier)) { return false; } hostLayer->RemoveSubLayerPath((int)position); return true; } /** * Move sublayer from source to target position. * * @param fromLayerIdentifier Layer identifier of source layer. * @param fromSublayerIndex The sublayer position of source layer to move. * @param toLayerIdentifier Layer identifier of target layer. * @param toSublayerIndex The sublayer position of target layer that source sublayer moves to. * @return true if the layer is moved successfully, false otherwise. */ static bool moveSublayer(const std::string& fromLayerIdentifier, size_t fromSublayerIndex, const std::string& toLayerIdentifier, size_t toSublayerIndex, size_t& toFinalPosition) { if (fromLayerIdentifier == toLayerIdentifier && fromSublayerIndex == toSublayerIndex) { return false; } auto fromLayer = PXR_NS::SdfLayer::FindOrOpen(fromLayerIdentifier); auto toLayer = PXR_NS::SdfLayer::FindOrOpen(toLayerIdentifier); if (!fromLayer || !toLayer) { return false; } if (fromSublayerIndex >= fromLayer->GetNumSubLayerPaths()) { return false; } if (fromLayerIdentifier != toLayerIdentifier && toSublayerIndex > toLayer->GetNumSubLayerPaths()) { toFinalPosition = toLayer->GetNumSubLayerPaths(); } else if (toSublayerIndex > toLayer->GetNumSubLayerPaths()) { toFinalPosition = toLayer->GetNumSubLayerPaths() - 1; } else { toFinalPosition = toSublayerIndex; } const auto& sublayerPaths = fromLayer->GetSubLayerPaths(); std::string sublayer = sublayerPaths[fromSublayerIndex]; sublayer = computeAbsolutePath(fromLayer, sublayer); UsdUtils::makePathRelativeToLayer(toLayer, sublayer); fromLayer->RemoveSubLayerPath((int)fromSublayerIndex); toLayer->InsertSubLayerPath(sublayer, (int)toFinalPosition); return true; } /** * Save all changes of the specified layers. * @param layerIdentifiers List of layer identifiers to be saved. */ static bool saveLayers(const std::vector<std::string>& layerIdentifiers) { bool success = true; for (const auto& layerIdentifier : layerIdentifiers) { if (!LayerUtils::saveLayer(layerIdentifier)) { success = false; CARB_LOG_ERROR("ERROR! Failed to save layer %s due to permission issue.", layerIdentifier.c_str()); } } return success; } /** * Gets all identifiers of all dirty layers in the local stack of stage (anonymous layers are not included). */ static std::vector<std::string> getLocalDirtyLayers(PXR_NS::UsdStageRefPtr stage) { std::vector<std::string> layerIdentifiers; PXR_NS::SdfLayerHandleVector layerStack = stage->GetLayerStack(true); for (const auto& layer : layerStack) { if (layer && !layer->IsAnonymous() && layer->IsDirty()) { layerIdentifiers.push_back(layer->GetIdentifier()); } } return layerIdentifiers; } /** * Saves layer and all its sublayers. * * @param layerIdentifier Layer identifier. * @return true if it's successful, or false otherwise. */ static bool saveLayer(const std::string& layerIdentifier, bool saveSublayers = false) { auto layer = PXR_NS::SdfLayer::Find(layerIdentifier); if (!layer) { return false; } if (saveSublayers) { auto stage = PXR_NS::UsdStage::Open(layer); stage->Save(); return true; } else { return layer->Save(); } } /** * Checks if a layer is empty (no root prims) or not. * * @param layerIdentifier Layer identifier. * @return true if it includes root prims, or false otherwise. */ static bool hasRootPrimSpecs(const std::string& layerIdentifier) { auto layer = PXR_NS::SdfLayer::FindOrOpen(layerIdentifier); if (!layer) { return false; } return layer->GetPseudoRoot()->GetNameChildren().size() > 0; } struct PairHash { template <class T1, class T2> std::size_t operator()(const std::pair<T1, T2>& pair) const { return std::hash<T1>()(pair.first) ^ std::hash<T2>()(pair.second); } }; // Depth-first pre-order traverse of layer and its sublayer descendants. // LayerGroupStartCallback will be called before iterating sublayers of a layer. // If LayerGroupStartCallback returns false, it will stop to traverse this layer. // For each layer it found, it will call LayerCallback. // After all sublayers have been traversed, it will call LayerGroupEndCallback. // You need to track the depth of whole traverse. // If layer is not found, LayerGroupStartCallback and LayerGroupEndCallback will be called // also by passing a nullptr as the layer handle, and its layer identifier. // REMINDER: Don't call this per frame, which may be harmful to performance as // it insolves with resolver. using LayerGroupStartCallback = std::function<bool(PXR_NS::SdfLayerRefPtr hostLayer, const std::string& layerIdentifier)>; using LayerCallback = std::function<void( PXR_NS::SdfLayerRefPtr hostLayer, PXR_NS::SdfLayerRefPtr sublayer, const std::string& layerIdentifier, size_t sublayerIndex)>; using LayerGroupEndCallback = std::function<void(PXR_NS::SdfLayerRefPtr hostLayer, const std::string& layerIdentifier)>; static void iterateSublayerTreeDFS(PXR_NS::SdfLayerRefPtr rootLayer, const LayerGroupStartCallback groupStartCallback, const LayerCallback callback, const LayerGroupEndCallback groupEndCallback) { CARB_PROFILE_ZONE(1, "iterateSublayerTreeDFS"); PXR_NS::ArResolverScopedCache cache; // If there is circular reference like layer1 -> sublayer1 -> layer1 -> sublayer2 ..., // it needs to be detected to avoid endless loop. // This map is used to record if <parent layer, current layer> has been accessed already // to avoid endless loop. std::unordered_set<std::pair<std::string, std::string>, PairHash> accessMap; iterateSublayerTreeDFSInternal(nullptr, rootLayer, rootLayer->GetIdentifier(), kLayerIndexNone, groupStartCallback, callback, groupEndCallback, accessMap); } // Depth-first pre-order traverse of prim tree. // PrimSpecGroupStartCallback will be called before iterating children of a prim. // It will stop to traverse it's children if it returns false. // For each prim it found, it will call PrimSpecCallback. // After all children haved been traversed, it will call PrimSpecGroupEndCallback. // You need to track the depth of whole traverse. using PrimSpecGroupStartCallback = std::function<bool(PXR_NS::SdfPrimSpecHandle parentSpecPrim)>; using PrimSpecCallback = std::function<void( PXR_NS::SdfPrimSpecHandle parentSpecPrim, PXR_NS::SdfPrimSpecHandle childSpecPrim, size_t primSpecIndex)>; using PrimSpecGroupEndCallback = std::function<void(PXR_NS::SdfPrimSpecHandle parentSpecPrim)>; static void iteratePrimSpecTreeDFS(PXR_NS::SdfLayerRefPtr layer, const PrimSpecGroupStartCallback groupStartCallback, const PrimSpecCallback callback, const PrimSpecGroupEndCallback groupEndCallback) { CARB_PROFILE_ZONE(1, "iteratePrimSpecTreeDFS"); PXR_NS::SdfPrimSpecHandle empty; auto children = layer->GetPseudoRoot()->GetNameChildren(); for (size_t i = 0; i < children.size(); i++) { iteratePrimTreeDFSInternal(empty, children[i], 0, groupStartCallback, callback, groupEndCallback); } } static void iteratePrimSpecTreeDFS(const std::string& layerIdentifier, const PrimSpecGroupStartCallback groupStartCallback, const PrimSpecCallback callback, const PrimSpecGroupEndCallback groupEndCallback) { auto layer = PXR_NS::SdfLayer::FindOrOpen(layerIdentifier); if (!layer) { return; } iteratePrimSpecTreeDFS(layer, groupStartCallback, callback, groupEndCallback); } /** * Gets the absolute path that's relative to root layer. * * @param rootLayer Root layer that the path is relative to. * @param path Path string. * @return Absolute path. If it's anonymous layer path, it will return it directly. */ static std::string computeAbsolutePath(const PXR_NS::SdfLayerRefPtr& rootLayer, const std::string& path) { if (PXR_NS::SdfLayer::IsAnonymousLayerIdentifier(path) || rootLayer->IsAnonymous()) { return path; } else { // Compute the path through the resolver const std::string& absolutePath = rootLayer->ComputeAbsolutePath(path); return normalizePath(absolutePath); } } /** * Similar to SdfLayer::FindOrOpen, but only calls SdfLayer::Find on anonymous layer to prevent USD Coding Error. * * @param identifier Layer identifier to be found or opened. * @return Found layer, or nullptr if not found. */ static PXR_NS::SdfLayerRefPtr findOrOpen(const std::string& identifier) { if (PXR_NS::SdfLayer::IsAnonymousLayerIdentifier(identifier)) { return PXR_NS::SdfLayer::Find(identifier); } return PXR_NS::SdfLayer::FindOrOpen(identifier); } static bool hasDirtyLayers(PXR_NS::UsdStageRefPtr stage) { CARB_PROFILE_ZONE(1, "hasDirtyLayers"); if (!stage) { return false; } for (auto& layer : stage->GetUsedLayers()) { if (!layer->IsAnonymous() && layer->IsDirty()) { return true; } } return false; } // Checks if layerIdentifier is in the sublayer tree rooted from hostLayer, and this layer // must be existed. static bool isLayerInSublayerTree(PXR_NS::SdfLayerRefPtr hostLayer, const std::string& identifier) { bool found = false; LayerUtils::iterateSublayerTreeDFS(hostLayer, [&identifier, &found](const pxr::SdfLayerRefPtr layer, const std::string& layerIdentifier) { if (layer && identifier == layerIdentifier) { found = true; return false; } return true; }, nullptr, nullptr ); return found; } static std::string normalizePath(const std::string& path) { static auto replaceAll = [](std::string str, const std::string& from, const std::string& to) { size_t start_pos = 0; while ((start_pos = str.find(from, start_pos)) != std::string::npos) { str.replace(start_pos, from.length(), to); start_pos += to.length(); // Handles case where 'to' is a substring of 'from' } return str; }; std::string finalPath = path; // FIXME: Need a better way to normalize path. finalPath = replaceAll(finalPath, "%3C", "<"); finalPath = replaceAll(finalPath, "%3E", ">"); finalPath = replaceAll(finalPath, "%20", " "); finalPath = replaceAll(finalPath, "%5C", "/"); std::replace(finalPath.begin(), finalPath.end(), '\\', '/'); return finalPath; } static std::string normalizeUrl(const std::string& url) { std::string result; auto omniclient = carb::getCachedInterface<carb::omniclient::IOmniClient>(); if (omniclient) { size_t bufferSize = 0; omniclient->normalizeUrl(url.c_str(), nullptr, &bufferSize); if (bufferSize != 0) { auto stringBufferHeap = std::unique_ptr<char[]>(new char[bufferSize]); const char* normalizedUrl = omniclient->normalizeUrl(url.c_str(), stringBufferHeap.get(), &bufferSize); if (!normalizedUrl) { result = url; } else { result = normalizedUrl; } } else { result = url; } } else { result = url; } return result; } private: template<typename T> static bool getLayerCustomFieldInRootLayer(PXR_NS::SdfLayerRefPtr rootLayer, const std::string& layerIdentifier, const std::string& key, T& value) { // By default, the layer is not muted. PXR_NS::VtDictionary valueMap; const PXR_NS::VtDictionary& rootLayerCustomData = rootLayer->GetCustomLayerData(); const auto& customDataValue = rootLayerCustomData.GetValueAtPath(key); if (customDataValue && !customDataValue->IsEmpty()) { valueMap = customDataValue->Get<PXR_NS::VtDictionary>(); } auto omniclient = carb::getCachedInterface<carb::omniclient::IOmniClient>(); for (const auto& valuePair : valueMap) { std::string absolutePath = rootLayer->ComputeAbsolutePath(valuePair.first); if (normalizeUrl(absolutePath) == normalizeUrl(layerIdentifier)) { value = valuePair.second.Get<T>(); return true; } } return false; } template<typename T> static bool setLayerCustomFieldInRootLayer(PXR_NS::SdfLayerRefPtr rootLayer, const std::string& layerIdentifier, const std::string& key, const T& value) { PXR_NS::VtDictionary valueMap; PXR_NS::VtDictionary rootLayerCustomData = rootLayer->GetCustomLayerData(); const auto& oldValue = rootLayerCustomData.GetValueAtPath(key); if (oldValue && !oldValue->IsEmpty()) { valueMap = oldValue->Get<PXR_NS::VtDictionary>(); } std::string relativePath = layerIdentifier; UsdUtils::makePathRelativeToLayer(rootLayer, relativePath); LayerUtils::normalizePath(relativePath); for (auto iter = valueMap.begin(); iter != valueMap.end(); iter++) { const std::string& absolutePath = rootLayer->ComputeAbsolutePath(iter->first); if (normalizeUrl(absolutePath) == normalizeUrl(layerIdentifier)) { valueMap.erase(iter); break; } } valueMap[relativePath] = PXR_NS::VtValue(value); rootLayerCustomData.SetValueAtPath(key, PXR_NS::VtValue(valueMap)); rootLayer->SetCustomLayerData(rootLayerCustomData); return true; } static void iterateSublayerTreeDFSInternal(PXR_NS::SdfLayerRefPtr hostLayer, PXR_NS::SdfLayerRefPtr currentlayer, const std::string& currentLayeridentifier, size_t sublayerIndex, const LayerGroupStartCallback& groupStartCallback, const LayerCallback& callback, const LayerGroupEndCallback& groupEndCallback, std::unordered_set<std::pair<std::string, std::string>, PairHash>& accessMap) { std::string hostLayerIdentifier; if (hostLayer) { hostLayerIdentifier = hostLayer->GetIdentifier(); } auto iter = accessMap.find({ hostLayerIdentifier, currentLayeridentifier }); if (iter != accessMap.end()) // This layer has been accessed already { return; } else { accessMap.insert({ hostLayerIdentifier, currentLayeridentifier }); } if (callback) { callback(hostLayer, currentlayer, currentLayeridentifier, sublayerIndex); } bool stop = false; if (groupStartCallback && !groupStartCallback(currentlayer, currentLayeridentifier)) { stop = true; } if (!stop && currentlayer) { const auto& sublayerPaths = currentlayer->GetSubLayerPaths(); for (size_t i = 0; i < sublayerPaths.size(); i++) { auto sublayer = getSublayer(currentlayer, i); std::string layerIdentifier; if (sublayer) { layerIdentifier = sublayer->GetIdentifier(); } else { getSublayerIdentifier(currentlayer, i, layerIdentifier); } iterateSublayerTreeDFSInternal(currentlayer, sublayer, layerIdentifier, i, groupStartCallback, callback, groupEndCallback, accessMap); } } if (groupEndCallback) { groupEndCallback(currentlayer, currentLayeridentifier); } } static void iteratePrimTreeDFSInternal(PXR_NS::SdfPrimSpecHandle parentPrim, PXR_NS::SdfPrimSpecHandle currentPrim, size_t primIndex, const PrimSpecGroupStartCallback& groupStartCallback, const PrimSpecCallback& callback, const PrimSpecGroupEndCallback& groupEndCallback) { if (!currentPrim || currentPrim->IsDormant()) { return; } if (callback) { callback(parentPrim, currentPrim, primIndex); } // Early stop bool stop = false; if (groupStartCallback) { stop = !groupStartCallback(currentPrim); } if (!stop) { auto childrenPrims = currentPrim->GetNameChildren(); for (size_t i = 0; i < childrenPrims.size(); i++) { iteratePrimTreeDFSInternal( currentPrim, childrenPrims[i], i, groupStartCallback, callback, groupEndCallback); } } if (groupEndCallback) { groupEndCallback(currentPrim); } return; } }; } }
omniverse-code/kit/include/omni/usd/ViewportTypes.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/usd/UsdTypes.h> #include <memory> namespace rtx { namespace resourcemanager { class RpResource; } } namespace omni { namespace usd { typedef size_t ViewPickingId; typedef int32_t ViewportHandle; static constexpr int32_t kInvalidViewportHandle = -1; using OnPickingCompleteFn = std::function<void(const char* path, const carb::Double3* worldPos)>; struct Picking { int left; int top; int right; int bottom; enum class Mode { eNone, eResetAndSelect, eMergeSelection, eInvertSelection, eQuery, /// request that do not change selection eTrack, /// track mouse position, picking request is allowed. eTrackBlocking, /// track mouse position, picking request is blocked. } mode; ViewPickingId pickingId; bool queryAddsOutline; // Used only when mode == eQuery OnPickingCompleteFn onCompleteFn; }; struct RenderVar { const char* name; union { void* rawResource; rtx::resourcemanager::RpResource* rpResource; }; bool isRpResource; }; } }
omniverse-code/kit/include/omni/usd/AssetUtils.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #ifndef USD_UTILS_INCLUDES # error "Please include UtilsIncludes.h before including this header or in pre-compiled header." #endif #include <omniAudioSchema/sound.h> #include <omni/usd/UsdUtils.h> #include <condition_variable> namespace omni { namespace usd { class AssetUtils { public: /** * Imports an external file in to the stage. * * @param stage The stage to import file into. * @param importUrl The URL of the imported file. * @param path The path to create the imported prim at. * @param dataSourcePath The path of the file in its dataSource (Only needed for MDL). * @param dataSource DataSource associated with this file (Only needed for MDL). * @param connection Connection associated with this file (Only needed for MDL). */ static pxr::UsdPrim createPrimFromAssetPath(pxr::UsdStageWeakPtr stage, const char* importUrl, const char* primPath, const char* dataSourcePath = "", carb::datasource::IDataSource* dataSource = nullptr, carb::datasource::Connection* connection = nullptr) { pxr::UsdPrim prim; std::string newPrimPath = omni::usd::UsdUtils::findNextNoneExisitingNodePath(stage, primPath, true); // MDL file static const std::regex kMdlFile("^.*\\.mdl(?:\\?.*)?$", std::regex_constants::icase | std::regex_constants::optimize); // the supported audio formats static const std::regex kAudioFile( "^.*\\.(?:wav|wave|ogg|oga|flac|fla|mp3|m4a|spx|opus)(?:\\?.*)?$", std::regex_constants::icase | std::regex_constants::optimize); std::string relativeUrl = importUrl; omni::usd::UsdUtils::makePathRelativeToLayer(stage->GetEditTarget().GetLayer(), relativeUrl); if (std::regex_search(importUrl, kMdlFile)) { CARB_ASSERT(dataSource && connection); prim = createMdlMaterial( stage, pxr::SdfPath(newPrimPath), relativeUrl.c_str(), dataSourcePath, "", dataSource, connection); } else if (std::regex_search(importUrl, kAudioFile)) { auto sound = pxr::OmniAudioSchemaOmniSound::Define(stage, pxr::SdfPath(newPrimPath)); if (sound) { sound.CreateFilePathAttr(pxr::VtValue(pxr::SdfAssetPath(relativeUrl.c_str()))); prim = sound.GetPrim(); } else CARB_LOG_ERROR("failed to define an OmniAudioSchemaOmniSound"); } return prim; } static pxr::UsdPrim createMdlMaterial(pxr::UsdStageWeakPtr stage, const pxr::SdfPath& primPath, const char* mdlPath, const char* mdlDataSourcePath, const char* mdlMaterialName = "", carb::datasource::IDataSource* dataSource = nullptr, carb::datasource::Connection* connection = nullptr) { // How do we get the material name: // - If user provides one via mdlMaterialName, use it. // - If user doesn't provide mdlMaterialName, but provides dataSource and connection, we do a regex search on // the MDL file's content and extract the first material name. // - If user provides nothing, or previous step fails, we take the MDL filename as the material name. std::string materialName = (mdlMaterialName == nullptr) ? "" : mdlMaterialName; if (materialName.empty() && dataSource && connection) { materialName = findMaterialNameFromMdlContent(dataSource, connection, mdlPath, mdlDataSourcePath); } if (materialName.empty()) { carb::extras::Path mdlCarbPath(mdlPath); materialName = mdlCarbPath.getStem(); } if (materialName.empty()) { CARB_LOG_ERROR("Failed to find material name for MDL file %s", mdlPath); } auto materialPrim = pxr::UsdShadeMaterial::Define(stage, primPath); if (materialPrim) { pxr::SdfPath shaderPrimPath = primPath.AppendPath(pxr::SdfPath("Shader")); auto shaderPrim = pxr::UsdShadeShader::Define(stage, shaderPrimPath); if (shaderPrim) { carb::settings::ISettings* settings = carb::getFramework()->acquireInterface<carb::settings::ISettings>(); bool authorOldMdlSchema = settings->getAsBool(omni::usd::kAuthorOldMdlSchemaSettingPath); if (authorOldMdlSchema) { materialPrim.CreateSurfaceOutput().ConnectToSource( shaderPrim.CreateOutput(pxr::TfToken("out"), pxr::SdfValueTypeNames->Token)); shaderPrim.CreateIdAttr(pxr::VtValue(pxr::TfToken("mdlMaterial"))); shaderPrim.GetPrim() .CreateAttribute(pxr::TfToken("module"), pxr::SdfValueTypeNames->Asset) .Set(pxr::SdfAssetPath(mdlPath)); shaderPrim.GetPrim().CreateAttribute(pxr::TfToken("name"), pxr::SdfValueTypeNames->String).Set(materialName); } else { pxr::TfToken mdlToken("mdl"); auto shaderOut = shaderPrim.CreateOutput(pxr::TfToken("out"), pxr::SdfValueTypeNames->Token); materialPrim.CreateSurfaceOutput(mdlToken).ConnectToSource(shaderOut); materialPrim.CreateVolumeOutput(mdlToken).ConnectToSource(shaderOut); materialPrim.CreateDisplacementOutput(mdlToken).ConnectToSource(shaderOut); shaderPrim.GetImplementationSourceAttr().Set(pxr::UsdShadeTokens->sourceAsset); shaderPrim.SetSourceAsset(pxr::SdfAssetPath(mdlPath), mdlToken); shaderPrim.SetSourceAssetSubIdentifier(pxr::TfToken(materialName), mdlToken); } } } return materialPrim.GetPrim(); } static std::string findMaterialNameFromMdlContent(carb::datasource::IDataSource* dataSource, carb::datasource::Connection* connection, const char* mdlPath, const char* mdlDataSourcePath) { std::string materialName; struct ReadArgs { std::string mdlContent; std::atomic<bool> readDone{ false }; std::condition_variable readDoneCondition; std::mutex readDoneMutex; } readArgs; dataSource->readData(connection, mdlDataSourcePath, std::malloc, [](carb::datasource::Response response, const char* path, uint8_t* payload, size_t payloadSize, void* userData) { ReadArgs* readArgs = reinterpret_cast<ReadArgs*>(userData); if (response == carb::datasource::Response::eOk) { readArgs->mdlContent = std::string(payload, payload + payloadSize); } std::free(payload); std::unique_lock<std::mutex> lock(readArgs->readDoneMutex); readArgs->readDone.store(true, std::memory_order_relaxed); readArgs->readDoneCondition.notify_all(); }, &readArgs); { std::unique_lock<std::mutex> lock(readArgs.readDoneMutex); readArgs.readDoneCondition.wait( lock, [&readArgs] { return readArgs.readDone.load(std::memory_order_relaxed); }); } if (readArgs.mdlContent.length() == 0) { CARB_LOG_ERROR("Failed to load %s when creating MDL material from it", mdlPath); return ""; } // Do a regex search on the content of MDL file to get first valid material name to use const static std::regex kExportedMaterialRegex( "export\\s+material\\s+([^\\s]+)\\s*\\(", std::regex_constants::optimize); std::smatch match; if (std::regex_search(readArgs.mdlContent, match, kExportedMaterialRegex)) { if (match.size() >= 2) { materialName = match[1]; } else { CARB_LOG_WARN("Could not extract material name from %s. Fallback to filename.", mdlPath); } } return materialName; } }; } }
omniverse-code/kit/include/omni/usd/UtilsIncludes.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // Define this macro so that other headers that are supposed to have this header included before them can check against // it. #define USD_UTILS_INCLUDES // Include cstdio here so that vsnprintf is properly declared. This is necessary because pyerrors.h has // #define vsnprintf _vsnprintf which later causes <cstdio> to declare std::_vsnprintf instead of the correct and proper // std::vsnprintf. By doing it here before everything else, we avoid this nonsense. #include <cstdio> // Python must be included first because it monkeys with macros that cause // TBB to fail to compile in debug mode if TBB is included before Python #include <boost/python/object.hpp> #include <pxr/base/gf/rotation.h> #include <pxr/base/tf/fileUtils.h> #include <pxr/usd/kind/registry.h> #include <pxr/usd/pcp/layerStack.h> #include <pxr/usd/sdf/attributeSpec.h> #include <pxr/usd/sdf/copyUtils.h> #include <pxr/usd/sdf/childrenUtils.h> #include <pxr/usd/sdf/fileFormat.h> #include <pxr/usd/sdf/relationshipSpec.h> #include <pxr/usd/usd/editContext.h> #include <pxr/usd/usd/modelAPI.h> #include <pxr/usd/usd/stage.h> #include <pxr/usd/usd/stageCache.h> #include <pxr/usd/usdGeom/bboxCache.h> #include <pxr/usd/usdGeom/metrics.h> #include <pxr/usd/usdGeom/xform.h> #include <pxr/usd/usdShade/material.h> #include <pxr/usd/usdUtils/stageCache.h> #include <pxr/usd/usdUtils/stitch.h> #include <pxr/usd/usdUtils/dependencies.h> #include <pxr/usd/usd/primCompositionQuery.h>