file_path
stringlengths 32
153
| content
stringlengths 0
3.14M
|
---|---|
omniverse-code/kit/include/omni/graph/exec/unstable/PassPipeline.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file PassPipeline.h
//!
//! @brief Declares @ref omni::graph::exec::unstable::PassPipeline.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGlobalPass.h>
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/IPartitionPass.h>
#include <omni/graph/exec/unstable/IPassPipeline.h>
#include <omni/graph/exec/unstable/IPassRegistry.h>
#include <omni/graph/exec/unstable/IPassTypeRegistry.h>
#include <omni/graph/exec/unstable/IPopulatePass.h>
#include <omni/graph/exec/unstable/Traversal.h>
#include <omni/graph/exec/unstable/Types.h>
#include <memory>
#include <unordered_map>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IPassPipeline
template <typename GraphBuilderT, typename... Bases>
class PassPipelineT : public Implements<Bases...>
{
public:
//! Creates and returns a @ref IPassPipeline.
//!
//! May throw.
static omni::core::ObjectPtr<PassPipelineT> create()
{
return omni::core::steal(new PassPipelineT);
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IPassPipeline::needsConstruction_abi
bool needsConstruction_abi() noexcept override
{
return !m_registryCache.inSync(getPassRegistry()->getStamp());
}
//! Core implementation of @ref omni::graph::exec::unstable::IPassPipeline::construct_abi
omni::core::Result construct_abi() noexcept override
{
try
{
auto registry = getPassRegistry();
if (m_registryCache.makeSync(registry->getStamp()))
{
m_populatePasses.clear();
auto populatePasses = registry->getPassTypeRegistry(PassType::ePopulate);
OMNI_GRAPH_EXEC_ASSERT(populatePasses);
if (populatePasses)
{
for (auto& record : populatePasses->getPasses())
{
m_populatePasses.emplace(record.nameToMatch->getHash(), record); // may throw
}
}
m_partitionPasses.clear();
auto partitionPasses = registry->getPassTypeRegistry(PassType::ePartitioning);
OMNI_GRAPH_EXEC_ASSERT(partitionPasses);
if (partitionPasses)
{
for (auto& record : partitionPasses->getPasses())
{
_insert_sorted(m_partitionPasses, record,
[](const PassTypeRegistryEntry& a, const PassTypeRegistryEntry& b) -> bool
{ return a.priority > b.priority; }); // may throw
}
}
}
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::IPassPipeline::needsExecute_abi
bool needsExecute_abi(Stamp globalTopology) noexcept override
{
return !m_globalTopology.inSync(globalTopology);
}
//! Core implementation of @ref omni::graph::exec::unstable::IPassPipeline::execute_abi
//!
//! Acceleration structure is generated on changes to @ref omni::graph::exec::unstable::IPassRegistry
//! to organize passes for this pipeline.
//!
//! @note Passes are executed single-threaded in core implementation because we have no access to rich
//! threading library in OV. We have parallel version available in omni.kit.exec.core.
omni::core::Result execute_abi(IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef) noexcept override
{
try
{
auto globalTopologyStamp = builderContext->getGraph()->getGlobalTopologyStamp();
if (m_globalTopology.inSync(*globalTopologyStamp))
{
return omni::core::kResultSuccess; // already in sync, nothing to do
}
if (this->needsConstruction())
{
this->construct(); // may throw
}
_runPopulatePass(builderContext, nodeGraphDef);
_runGlobalPass(builderContext, nodeGraphDef);
m_globalTopology.sync(*globalTopologyStamp);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Execute populate passes
//!
//! The algorithm will traverse the graph with DFS order serially. For each visited node,
//! it will give a chance to registered population passes to populate the definition
//! of discovered @ref omni::graph::exec::unstable::INode or @ref omni::graph::exec::unstable::INodeGraphDef.
//!
//! To preserve instancing within a single NodeGraphDef, algorithm will keep a track of discovered
//! NodeGraphDefs and populate/continue traversal of only first visited node that instantiates it.
//! Later when internal state goes out of scope, all instances are updated to point to same definition.
//!
//! This algorithm is wrote in a way to follow the multithreaded version from omni.kit.exec.core and
//! the goal is to replace it with multithreaded version once OM-70769 is closed.
void _runPopulatePass(IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef)
{
class PassState
{
struct Record
{
INode* orgNode{ nullptr };
// This definition might have been released if it is only single instance
// and it was changed during graph transformation. That's ok, since we will
// only use the address of the pointer, and only if other instances exist
// (if so, they will own a reference to this pointer making it still valid)
INodeGraphDef* orgNodeGraphDef{ nullptr };
// definitions are referenced by nodes in the graph, but can as well be hold
// by the authoring side for fast access. We will allocate a small space
// for these cases to avoid dynamic allocation cost when reserving space upfront for all instances
// based on use count.
SmallVector<INode*, 2> accumulatedInstances;
static_assert(
sizeof(accumulatedInstances) == 24,
"Expecting sizeof(SmallVector<INode*, 2>) to be 24 bytes and match sizeof an empty std::vector");
Record(INode* instance, INodeGraphDef* nodeGraphDef) : orgNode(instance), orgNodeGraphDef(nodeGraphDef)
{
accumulatedInstances.reserve(useCount(nodeGraphDef));
}
void addInstance(INode* instance)
{
accumulatedInstances.emplace_back(instance);
}
void processInstances()
{
if (accumulatedInstances.size() == 0)
return;
if (auto newNodeDef = orgNode->getNodeDef())
{
for (auto node : accumulatedInstances)
{
exec::unstable::cast<IGraphBuilderNode>(node)->_setNodeDef(newNodeDef);
}
}
else
{
auto newNodeGraphDef = orgNode->getNodeGraphDef();
if (newNodeGraphDef != orgNodeGraphDef)
{
for (auto node : accumulatedInstances)
{
exec::unstable::cast<IGraphBuilderNode>(node)->_setNodeGraphDef(newNodeGraphDef);
}
}
}
}
};
public:
PassState(PassPipelineT* owner, IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef)
: m_passPipeline(owner)
{
m_builder = GraphBuilder::createForPass(builderContext, nodeGraphDef);
}
~PassState()
{
// Update instances
for (auto& pair : m_registry)
{
pair.second.processInstances();
}
// If this NodeGraphDef changed in this population pass, run the partitioning pass on it
auto* nodeGraphDef = m_builder->getNodeGraphDef();
auto* builderContext = m_builder->getContext();
auto constructionStamp = nodeGraphDef->getTopology()->getConstructionStamp();
if (constructionStamp.inSync(builderContext->getConstructionStamp()))
m_passPipeline->_runPartitionPass(builderContext, nodeGraphDef);
}
IGraphBuilder* getBuilder()
{
return m_builder.get();
}
//! Store record of a node graph if it has multiple instances referencing it.
//!
//! Returns true if a new record was generated for this NodeGraphDef.
//! It also returns true when nodeGraphDef has only single instance referencing it. For performance
//! reasons we don't store such record in the container which reduces cost in updateInstances method.
bool addRecord(INode* instance, INodeGraphDef* nodeGraphDef)
{
auto fountIt = m_registry.find(nodeGraphDef);
if (fountIt == m_registry.end())
{
m_registry.emplace(std::piecewise_construct, std::forward_as_tuple(nodeGraphDef),
std::forward_as_tuple(instance, nodeGraphDef));
return true;
}
else
{
fountIt->second.addInstance(instance);
return false;
}
}
private:
PassPipelineT* m_passPipeline;
GraphBuilderPtr m_builder;
using Container = std::unordered_map<INodeGraphDef*, Record>;
Container m_registry;
};
auto traversalFnImp = [this, builderContext](INodeGraphDef* nodeGraphDef, auto& fn) -> void
{
PassState nodeGraphDefPassState(this, builderContext, nodeGraphDef);
traversal_dfs<VisitFirst>(
nodeGraphDef->getTopology()->getRoot(),
[this, &nodeGraphDefPassState, &fn](auto info, INode* prev, INode* curr)
{
bool processNodeGraphDef = true;
auto builder = nodeGraphDefPassState.getBuilder();
auto foundIt = this->m_populatePasses.find(curr->getName().getHash());
if (foundIt != this->m_populatePasses.end())
{
auto newPass = foundIt->second.factory->createPass(builder);
auto newPopulatePass = exec::unstable::cast<IPopulatePass>(newPass);
newPopulatePass->run(builder, curr);
}
else if (auto currNodeGraphDef = curr->getNodeGraphDef())
{
auto foundIt = this->m_populatePasses.find(currNodeGraphDef->getName().getHash());
if (foundIt != this->m_populatePasses.end())
{
processNodeGraphDef = nodeGraphDefPassState.addRecord(curr, currNodeGraphDef);
if (processNodeGraphDef)
{
auto newPass = foundIt->second.factory->createPass(builder);
auto newPopulatePass = exec::unstable::cast<IPopulatePass>(newPass);
newPopulatePass->run(builder, curr);
}
}
}
if (processNodeGraphDef)
{
// re-acquire the graph node def (because there might have been pass
// that expanded it) and traverse inside
if (auto nodeGraphDef = curr->getNodeGraphDef())
{
fn(nodeGraphDef, fn);
}
}
info.continueVisit(curr);
});
// nodeGraphDefPassState is going out of scope and will trigger additional work
// the code is structure this way to make it easier for multithreaded execution
// where partitioning pass should execute when the last reference of the state is removed.
};
traversalFnImp(nodeGraphDef, traversalFnImp);
}
//! Execute partition passes
void _runPartitionPass(IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef)
{
// Partition passes require NodeGraphDef's to provide node factory. Skip ones that doesn't
// implement it.
if (!nodeGraphDef->getNodeFactory())
{
return;
}
auto builder{ GraphBuilderT::createForPass(builderContext, nodeGraphDef) };
INode* root = nodeGraphDef->getTopology()->getRoot();
std::vector<PartitionPassPtr> passInstances;
passInstances.reserve(m_partitionPasses.size());
// Initialize partitioning passes
for (auto& record : m_partitionPasses)
{
auto newPass = record.factory->createPass(builder);
auto newPartitionPass = exec::unstable::cast<IPartitionPass>(newPass);
if (newPartitionPass->initialize(nodeGraphDef->getTopology()))
{
passInstances.emplace_back(newPartitionPass, omni::core::kBorrow);
}
}
// No need to do the traversal if nothing initialized for this node graph def
if (passInstances.size() == 0)
return;
// Select nodes for partitioning
traversal_dfs<VisitFirst>(root,
[this, &passInstances](auto info, INode* prev, INode* curr)
{
for (auto& pass : passInstances)
{
pass->run(curr);
}
info.continueVisit(curr);
});
// commit changes to the definition
for (auto& pass : passInstances)
{
pass->commit(builder);
}
}
//! Execute global passes
void _runGlobalPass(IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef)
{
auto builder{ GraphBuilderT::createForPass(builderContext, nodeGraphDef) };
auto registry = getPassRegistry();
auto globalPasses = registry->getPassTypeRegistry(PassType::eGlobal);
for (auto& record : globalPasses->getPasses())
{
auto newPass = record.factory->createPass(builder); // may throw
auto newGlobalPass = exec::unstable::cast<IGlobalPass>(newPass);
newGlobalPass->run(builder);
}
}
//! Helper function to insert an item into a vector in sorted order.
template <typename T, typename Compare>
typename std::vector<T>::iterator _insert_sorted(std::vector<T>& vec, T const& item, Compare comp)
{
return vec.insert(std::upper_bound(vec.begin(), vec.end(), item, comp), item);
}
//! Type of acceleration structure holding population passes
using PopulatePassCache = std::unordered_map<NameHash, PassTypeRegistryEntry>;
SyncStamp m_registryCache; //!< Synchronized with cache registry version allows us to detect changes
SyncStamp m_globalTopology; //!< Synchronized with execution graph allows us to detect changes to execute the
//!< pipeline
PopulatePassCache m_populatePasses; //!< Acceleration structure for this pipeline to speed up the searches
std::vector<PassTypeRegistryEntry> m_partitionPasses; //!< Acceleration structure to keep passes ordered for this
//!< pipeline. We order from the highest priority to the
//!< lowest.
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#include <omni/graph/exec/unstable/GraphBuilder.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Core PassPipeline implementation for @ref omni::graph::exec::unstable::IPassPipeline
using PassPipeline = PassPipelineT<GraphBuilder, IPassPipeline>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/IScheduleFunction.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IScheduleFunction.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IScheduleFunction.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/SchedulingInfo.h>
#include <omni/graph/exec/unstable/Status.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IScheduleFunction_abi;
class IScheduleFunction;
template <typename T>
class ScheduleFunction;
//! Interface wrapping a function (possibly with storage). Used to wrap a task when passing generated work to the
//! scheduler.
class IScheduleFunction_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IScheduleFunction")>
{
protected:
//! Main execute method. Returning status of the execution.
virtual Status invoke_abi() noexcept = 0;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IScheduleFunction.gen.h>
//! @copydoc omni::graph::exec::unstable::IScheduleFunction_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IScheduleFunction
: public omni::core::Generated<omni::graph::exec::unstable::IScheduleFunction_abi>
{
};
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IScheduleFunction.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDef.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Graph definition. Defines work to be done as a graph.
//!
//! Nodes within a graph represent work to be done. The actual work to be performed is described in a
//! @rstref{definition <ef_definition>}. Each node wanting to perform work points to a defintion.
//!
//! This interface is a subclass of the work definition interface (i.e. @ref omni::graph::exec::unstable::IDef) and
//! extends @ref omni::graph::exec::unstable::IDef with methods to describe work as a graph.
//!
//! Visually:
//!
//! @rst
//!
//! .. image:: /../docs/ef-simple-w-defs.svg
//! :align: center
//!
//! @endrst
//!
//! Above, you can see the two types of definitions: opaque definitions (described by @ref
//! omni::graph::exec::unstable::INodeDef) and graph definitions (described by this interface).
//!
//! Nodes within a graph definition can point to other graph definitions. This composibility is where EF gets its *graph
//! of graphs* moniker.
//!
//! Multiple node's in the execution graph can point to the same instance of a graph definition. This saves both space
//! and graph construction time. However, since each graph definition can be shared, its pointer value cannot be used
//! to uniquely identify its location in the graph. To solve this, when traversing/executing a graph definition, an
//! @ref omni::graph::exec::unstable::ExecutionPath is passed (usually via @ref
//! omni::graph::exec::unstable::ExecutionTask::getUpstreamPath()).
//!
//! When defining new graph types, it is common to create a new implementation of this interface. See @ref
//! omni::graph::exec:unstable::NodeGraphDef for an implementation of this interface that can be easily inherited from.
//! See @rstref{Definition Creation <ef_definition_creation>} for a guide on creating your own graph definition.
//!
//! How a graph definition's nodes are traversed during execution is defined by the definition's @ref
//! omni::graph::exec::unstable::IExecutor. See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth
//! guide on how executors and graph definitions work together during execution.
//!
//! See also @ref omni::graph::exec::unstable::INode, @ref omni::graph::exec::unstable::IExecutor, and @ref
//! omni::graph::exec::unstable::ExecutionTask.
template <>
class omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>
: public omni::graph::exec::unstable::INodeGraphDef_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeGraphDef")
//! Return this graph's topology object.
//!
//! Each @ref omni::graph::exec::unstable::INodeGraphDef owns a @ref omni::graph::exec::unstable::ITopology.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology. will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
omni::graph::exec::unstable::ITopology* getTopology() noexcept;
//! Initialize the state of the graph.
//!
//! It is up to the implementation of the graph type to decide whether this call needs to be propagated over all
//! nodes within the graph or a single shared state is owned by the graph.
//!
//! @param rootTask State will be initialized for every instance of this graph. Root task will provide a path to
//! allow discovery of the state. Must not be @c nullptr.
void initializeState(omni::graph::exec::unstable::ExecutionTask& rootTask);
//! Pre-execution call can be used to setup the graph state prior to execution or skip entirely the execution.
//!
//! The given task must not be @c nullptr.
omni::graph::exec::unstable::Status preExecute(omni::graph::exec::unstable::ExecutionTask& info) noexcept;
//! Post-execution call can be used to finalize the execution, e.g. transfer computation results to consumers.
//!
//! The given task must not be @c nullptr.
omni::graph::exec::unstable::Status postExecute(omni::graph::exec::unstable::ExecutionTask& info) noexcept;
//! Acquire factory object allowing for allocating new node instances for this node graph def.
//!
//! Provided factory may be empty when graph def doesn't allow allocating new nodes outside of pass that constructed
//! the definition in the first place.
//!
//! Accessing node factory is thread-safe but mutating graphs topology is not. This includes node creation.
omni::core::ObjectPtr<omni::graph::exec::unstable::INodeFactory> getNodeFactory() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::ITopology* omni::core::Generated<
omni::graph::exec::unstable::INodeGraphDef_abi>::getTopology() noexcept
{
return getTopology_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>::initializeState(
omni::graph::exec::unstable::ExecutionTask& rootTask)
{
OMNI_THROW_IF_FAILED(initializeState_abi(&rootTask));
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>::preExecute(
omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return preExecute_abi(&info);
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>::postExecute(
omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return postExecute_abi(&info);
}
inline omni::core::ObjectPtr<omni::graph::exec::unstable::INodeFactory> omni::core::Generated<
omni::graph::exec::unstable::INodeGraphDef_abi>::getNodeFactory() noexcept
{
return omni::core::steal(getNodeFactory_abi());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundResult.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IBackgroundResult.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IBackgroundResult.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Status.h>
#include <omni/graph/exec/unstable/Types.h>
#include <chrono>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class ExecutionTask;
class IBackgroundResult;
class IBackgroundResult_abi;
//! Class representing a result of asynchronous computation.
//!
//! Create via @ref omni::graph::exec::unstable::IBackgroundTask::getBackgroundResult().
//!
//! Call @ref omni::graph::exec::unstable::IBackgroundResult::isReady() or @ref
//! omni::graph::exec::unstable::IBackgroundResult::waitFor() to make sure the result is ready. Once the result is
//! ready, call @ref omni::graph::exec::unstable::IBackgroundResult::write() to make the result visible.
//!
//! Operates much like `std::future`.
class IBackgroundResult_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IBackgroundResult")>
{
protected:
//! Check if background computation has a result available for consumption.
//!
//! @return @c true when it is safe to call omni::graph::exec::unstable::IBackgroundResult::write(), @c false
//! otherwise.
//!
//! Once @ref omni::graph::exec::unstable::IBackgroundResult::write() has been called, this method will return an
//! error.
//!
//! This method is not thread safe.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
isReady_abi(OMNI_ATTR("not_null, throw_if_null, out, *return") bool* ready) noexcept = 0;
//! Request background processing cancellation
//!
//! @param blocking If @c true, this call won't exit until background processing is completed.
//!
//! This method is not thread safe.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result cancel_abi(bool blocking) noexcept = 0;
//! Write the result.
//!
//! This method is not thread safe.
//!
//! An error is returned if this method is called more than once.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
write_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info,
OMNI_ATTR("out, not_null, throw_if_null, *return") Status* out) noexcept = 0;
//! Waits for the specified time for the result to become ready.
//!
//! If the result becomes ready in the specified time (or is already ready) @ref
//! omni::graph::exec::unstable::BackgroundResultStatus::eReady is returned. Otherwise, @ref
//! omni::graph::exec::unstable::BackgroundResultStatus::eTimeout is returned.
//!
//! This method is not thread safe.
//!
//! Returns an error if the result has already been consumed.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
waitFor_abi(uint64_t nanoseconds,
OMNI_ATTR("out, not_null, throw_if_null, *return") BackgroundResultStatus* out) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IBackgroundResult.
using BackgroundResultPtr = omni::core::ObjectPtr<IBackgroundResult>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IBackgroundResult.gen.h>
//! @copydoc omni::graph::exec::unstable::IBackgroundResult_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IBackgroundResult
: public omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi>
{
public:
//! Waits the specified time for the result to become ready.
//!
//! See @ref IBackgroundResult_abi::waitFor_abi().
template <typename Rep, typename Period>
BackgroundResultStatus waitFor(std::chrono::duration<Rep, Period> duration)
{
return waitFor(std::chrono::duration_cast<std::chrono::nanoseconds>(duration).count());
}
using omni::core::Generated<IBackgroundResult_abi>::waitFor;
};
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IBackgroundResult.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/Topology.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Topology.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::Topology.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/CompactUniqueIndex.h>
#include <omni/graph/exec/unstable/ITopology.h>
#include <omni/graph/exec/unstable/Node.h>
#include <memory>
#include <unordered_map>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::ITopology
class Topology : public Implements<ITopology>
{
public:
//! Creates a new topology.
//!
//! May throw.
static omni::core::ObjectPtr<Topology> create(const char* rootDebugName)
{
OMNI_THROW_IF_ARG_NULL(rootDebugName);
return omni::core::steal(new Topology(rootDebugName));
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::getNodeCount_abi
uint64_t getNodeCount_abi() noexcept override
{
return m_nodeIndexes.size();
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::getRoot_abi
INode* getRoot_abi() noexcept override
{
return m_root.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::getStamp_abi
Stamp getStamp_abi() noexcept override
{
return m_version;
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::invalidate_abi
void invalidate_abi() noexcept override
{
if (isValid())
{
m_version.next();
_forwardInvalidation();
}
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::acquireNodeIndex_abi
omni::core::Result acquireNodeIndex_abi(NodeIndexInTopology* out) noexcept override
{
try
{
*out = m_nodeIndexes.acquireUniqueIndex(); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::releaseNodeIndex_abi
void releaseNodeIndex_abi(NodeIndexInTopology index) noexcept override
{
m_nodeIndexes.releaseUniqueIndex(index);
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::addInvalidationForwarder_abi
virtual omni::core::Result addInvalidationForwarder_abi(InvalidationForwarderId owner,
IInvalidationForwarder* callback) noexcept override
{
try
{
m_invalidationForwarders.emplace( // may throw
std::piecewise_construct, std::forward_as_tuple(owner),
std::forward_as_tuple(callback, omni::core::kBorrow));
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::removeInvalidationForwarder_abi
void removeInvalidationForwarder_abi(InvalidationForwarderId owner) noexcept override
{
auto foundIt = m_invalidationForwarders.find(owner);
if (foundIt != m_invalidationForwarders.end())
{
m_invalidationForwarders.erase(foundIt);
}
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::getConstructionStamp_abi for @ref NodeDef
SyncStamp getConstructionStamp_abi() noexcept override
{
return m_constructionStamp;
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::_setConstructionInSync_abi for @ref NodeDef
void _setConstructionInSync_abi(Stamp toSync) noexcept override
{
m_constructionStamp.sync(toSync);
}
//! Constructor
explicit Topology(const char* rootDebugName) : m_root{ Node::create(this, rootDebugName) }
{
static Stamp sTopologyVersion;
sTopologyVersion.next();
m_version = sTopologyVersion;
}
//! Destructor
virtual ~Topology()
{
// prevent the root node's ~Node() trying to invalidate the topology
m_version.next();
}
private:
//! Invoke invalidation forwarders
void _forwardInvalidation()
{
for (auto& pair : m_invalidationForwarders)
{
pair.second->invoke(this);
}
}
CompactUniqueIndex m_nodeIndexes; //!< Compact registry of nodes unique indexes
omni::core::ObjectPtr<INode> m_root; //!< Root node allowing to discover all nodes within the current topology
Stamp m_version; //!< Topology version used by nodes to detect if they belong to current topology version
SyncStamp m_constructionStamp; //!< Synchronized with @ref omni::graph::exec::unstable::IGraphBuilderContext.
//!< Allows detecting in which construction pass this topology was altered.
//! Array of functions to call on topology invalidation
std::unordered_map<InvalidationForwarderId, omni::core::ObjectPtr<IInvalidationForwarder>> m_invalidationForwarders;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/IPass.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for graph transformation passes.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPass_abi> : public omni::graph::exec::unstable::IPass_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPass")
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/IBase.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IBase.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IBase.
#pragma once
#include <omni/core/IObject.h>
#include <omni/graph/exec/unstable/SchedulingInfo.h>
#include <omni/graph/exec/unstable/Status.h>
#include <omni/graph/exec/unstable/Types.h>
//! @defgroup groupOmniGraphExecInterfaces API Interfaces
//!
//! @brief Convenience interfaces backed by a stable ABI.
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IBase;
class IBase_abi;
class ExecutionTask;
//! Base class for all @ref omni::graph::exec objects.
//!
//! Defines an interface for casting between objects without calling @ref omni::core::IObject::acquire().
class IBase_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.exec.unstable.IBase")>
{
protected:
//! Casts this object to the type described the the given id.
//!
//! Returns @c nullptr if the cast was not successful.
//!
//! Unlike @ref omni::core::IObject::cast(), this casting method does not call @ref omni::core::IObject::acquire().
//!
//! @thread_safety This method is thread safe.
virtual void* castWithoutAcquire_abi(omni::core::TypeId id) noexcept = 0;
//! Returns the number of different instances (this included) referencing the current object.
//!
//! @thread_safety This method is thread safe.
virtual uint32_t getUseCount_abi() noexcept = 0;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IBase.gen.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IBase_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class IBase : public omni::core::Generated<omni::graph::exec::unstable::IBase_abi>
{
};
//! Casts the given pointer to the given interface (e.g. T).
//!
//! `nullptr` is accepted.
//!
//! Unlike @ref omni::core::cast(), this function does not call @ref omni::core::IObject::acquire() on the returned
//! pointer.
//!
//! @returns A valid pointer is returned if the given pointer implements the given interface. Otherwise, `nullptr` is
//! returned.
template <typename T, typename U>
inline T* cast(U* ptr) noexcept
{
static_assert(std::is_base_of<IBase, T>::value, "cast can only be used with classes that derive from IBase");
if (ptr)
{
return reinterpret_cast<T*>(ptr->castWithoutAcquire(T::kTypeId));
}
else
{
return nullptr;
}
}
//! Casts the given pointer to the given interface (e.g. T).
//!
//! `nullptr` is accepted.
//!
//! Unlike @ref omni::core::cast(), this function does not call @ref omni::core::IObject::acquire() on the returned
//! pointer.
//!
//! @returns A valid pointer is returned if the given pointer implements the given interface. Otherwise, `nullptr` is
//! returned.
template <typename T, typename U>
inline T* cast(omni::core::ObjectParam<U> ptr) noexcept
{
static_assert(std::is_base_of<IBase, T>::value, "cast can only be used with classes that derive from IBase");
if (ptr)
{
return reinterpret_cast<T*>(ptr->castWithoutAcquire(T::kTypeId));
}
else
{
return nullptr;
}
}
//! Casts the given pointer to the given interface (e.g. T).
//!
//! `nullptr` is accepted.
//!
//! Unlike @ref omni::core::cast(), this function does not call @ref omni::core::IObject::acquire() on the returned
//! pointer.
//!
//! @returns A valid pointer is returned if the given pointer implements the given interface. Otherwise, `nullptr` is
//! returned.
template <typename T, typename U>
inline T* cast(omni::core::ObjectPtr<U> ptr) noexcept
{
static_assert(std::is_base_of<IBase, T>::value, "cast can only be used with classes that derive from IBase");
if (ptr)
{
return reinterpret_cast<T*>(ptr->castWithoutAcquire(T::kTypeId));
}
else
{
return nullptr;
}
}
#ifndef DOXYGEN_BUILD
namespace details
{
template <typename T>
inline void* castWithoutAcquire(T* obj, omni::core::TypeId id) noexcept; // forward declaration
} // namespace details
#endif
//! Helper template for implementing the @ref castWithoutAcquire function for one or more interfaces.
template <typename T, typename... Rest>
struct ImplementsCastWithoutAcquire : public T, public Rest...
{
public:
//! See @ref omni::core::IObject::cast.
inline void* cast(omni::core::TypeId id) noexcept
{
// note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it
// has zero-overhead.
return static_cast<T*>(this)->cast(id);
}
//! See @ref omni::graph::exec::unstable::IBase_abi::castWithoutAcquire_abi.
inline void* castWithoutAcquire(omni::core::TypeId id) noexcept
{
// note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it
// has zero-overhead.
return static_cast<T*>(this)->castWithoutAcquire(id);
}
private:
// given a type id, castImpl() check if the type id matches T's typeid. if not, T's parent class type id is
// checked. if T's parent class type id does not match, the grandparent class's type id is check. this continues
// until IObject's type id is checked.
//
// if no type id in T's inheritance chain match, the next interface in Rest is checked.
//
// it's expected the compiler can optimize away the recursion
template <typename U, typename... Args>
inline void* castImpl(omni::core::TypeId id) noexcept
{
// omni::core::detail::cast will march down the inheritance chain
void* obj = omni::core::detail::cast<U>(this, id);
if (nullptr == obj)
{
// check the next class (inheritance chain) provide in the inheritance list
return castImpl<Args...>(id);
}
return obj;
}
// given a type id, castWithoutAcquireImpl() check if the type id matches T's typeid. if not, T's parent class type
// id is checked. if T's parent class type id does not match, the grandparent class's type id is check. this
// continues until IObject's type id is checked.
//
// if no type id in T's inheritance chain match, the next interface in Rest is checked.
//
// it's expected the compiler can optimize away the recursion
template <typename U, typename... Args>
inline void* castWithoutAcquireImpl(omni::core::TypeId id) noexcept
{
// details::castWithoutAcquire will march down the inheritance chain
void* obj = details::castWithoutAcquire<U>(this, id);
if (nullptr == obj)
{
// check the next class (inheritance chain) provide in the inheritance list
return castWithoutAcquireImpl<Args...>(id);
}
return obj;
}
// this terminates walking across the types in the variadic template
template <int = 0>
inline void* castImpl(omni::core::TypeId) noexcept
{
return nullptr;
}
// this terminates walking across the types in the variadic template
template <int = 0>
inline void* castWithoutAcquireImpl(omni::core::TypeId) noexcept
{
return nullptr;
}
protected:
virtual ~ImplementsCastWithoutAcquire() noexcept = default;
//! @copydoc omni::core::IObject_abi::cast_abi
void* cast_abi(omni::core::TypeId id) noexcept override
{
return castImpl<T, Rest...>(id);
}
//! @copydoc omni::graph::exec::unstable::IBase_abi::castWithoutAcquire_abi
void* castWithoutAcquire_abi(omni::core::TypeId id) noexcept override
{
return castWithoutAcquireImpl<T, Rest...>(id);
}
};
//! Helper template for implementing one or more interfaces.
//!
//! Similar functionality as @ref omni::core::Implements but adds support for @ref ImplementsCastWithoutAcquire.
template <typename T, typename... Rest>
struct Implements : public ImplementsCastWithoutAcquire<T, Rest...>
{
public:
//! See @ref omni::core::IObject::acquire.
inline void acquire() noexcept
{
// note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it
// has zero-overhead.
static_cast<T*>(this)->acquire();
}
//! See @ref omni::core::IObject::release.
inline void release() noexcept
{
// note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it
// has zero-overhead.
static_cast<T*>(this)->release();
}
protected:
std::atomic<uint32_t> m_refCount{ 1 }; //!< Reference count.
virtual ~Implements() noexcept = default;
//! @copydoc omni::core::IObject_abi::acquire_abi()
void acquire_abi() noexcept override
{
m_refCount.fetch_add(1, std::memory_order_relaxed);
}
//! @copydoc omni::core::IObject_abi::release_abi()
void release_abi() noexcept override
{
if (0 == m_refCount.fetch_sub(1, std::memory_order_release) - 1)
{
std::atomic_thread_fence(std::memory_order_acquire);
delete this;
}
}
//! Returns the number of different instances (this included) referencing the current object.
uint32_t getUseCount_abi() noexcept override
{
return m_refCount;
}
};
#ifndef DOXYGEN_BUILD
namespace details
{
//! Given a type, this function walks the inheritance chain for the type, checking if the id of the type matches the
//! given id.
//!
//! Implementation detail. Do not call.
template <typename T>
inline void* castWithoutAcquire(T* obj, omni::core::TypeId id) noexcept
{
if (T::kTypeId == id)
{
return obj;
}
else
{
return castWithoutAcquire<typename T::BaseType>(obj, id); // call cast again, but with the parent type
}
}
//! Specialization of `castWithoutAcquire<T>(T*, TypeId)` for @ref omni::graph::exec::unstable::IBase. @ref
//! omni::graph::exec::unstable::IBase always terminates the recursive template since it does not have a base class.
//!
//! Implementation detail. Do not call.
template <>
inline void* castWithoutAcquire<IBase>(IBase* obj, omni::core::TypeId id) noexcept
{
if (IBase::kTypeId == id)
{
return obj;
}
else
{
return nullptr;
}
}
} // namespace details
#endif
//! Helper utility to access the number of different instances referencing the given object.
//!
//! It does it without modifying the referencing count.
template <typename T>
inline uint32_t useCount(T* ptr) noexcept
{
static_assert(std::is_base_of<IBase, T>::value, "useCount can only be used with classes that derive from IBase");
if (ptr)
{
return ptr->getUseCount();
}
else
{
return 0;
}
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IBase.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/IDef.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for all node definitions
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Since definitions can be shared by multiple nodes, and nodes can be executed in parallel, implementations of
//! this interface should expect its methods to be called in parallel.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IDef_abi> : public omni::graph::exec::unstable::IDef_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IDef")
//! Execute the node definition.
//!
//! See thread safety information in interface description.
omni::graph::exec::unstable::Status execute(omni::graph::exec::unstable::ExecutionTask& info) noexcept;
//! Provide runtime information about scheduling constraints particular task have
//!
//! The provided @ref omni::graph::exec::unstable::ExecutionTask can be used to determine the path of the current
//! definition.
//!
//! The given task must not be @c nullptr.
//!
//! See thread safety information in interface description.
omni::graph::exec::unstable::SchedulingInfo getSchedulingInfo(
const omni::graph::exec::unstable::ExecutionTask& info) noexcept;
//! Return unique definition identifier.
//!
//! See thread safety information in interface description.
const omni::graph::exec::unstable::ConstName& getName() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IDef_abi>::execute(
omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return execute_abi(&info);
}
inline omni::graph::exec::unstable::SchedulingInfo omni::core::Generated<omni::graph::exec::unstable::IDef_abi>::getSchedulingInfo(
const omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return getSchedulingInfo_abi(&info);
}
inline const omni::graph::exec::unstable::ConstName& omni::core::Generated<omni::graph::exec::unstable::IDef_abi>::getName() noexcept
{
return *(getName_abi());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundTask.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IBackgroundTask.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IBackgroundTask.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <utility>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IBackgroundResult;
class IBackgroundResultWriter;
class IBackgroundTask;
class IBackgroundTask_abi;
//! Class representing a background task.
class IBackgroundTask_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IBackgroundTask")>
{
protected:
//! Returns a @c std::future like object used to check if the background task has completed.
//!
//! A error is returned if this method is called more than once.
//!
//! This method is not thread safe.
virtual OMNI_ATTR("throw_result") omni::core::Result
getBackgroundResult_abi(OMNI_ATTR("not_null, throw_if_null, out, *return") IBackgroundResult** out) noexcept = 0;
//! Completes async computation by setting a functor to the result as a shared state of the promise.
//!
//! It is the responsibility of the @ref omni::graph::exec::unstable::IBackgroundResult user to call @ref
//! omni::graph::exec::unstable::IBackgroundResult::write() to invoke this given @ref
//! omni::graph::exec::unstable::IBackgroundResultWriter. This allows the task's waiter to optionally not consume
//! the result of the task.
//!
//! @ref omni::core::IObject::acquire() is called on the given writer.
//!
//! A error is returned if this method is called more than once.
//!
//! This method is not thread safe.
virtual OMNI_ATTR("no_api, throw_result") omni::core::Result
setResultWriter_abi(OMNI_ATTR("not_null, throw_if_null") IBackgroundResultWriter* writer) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IBackgroundTask.
using BackgroundTaskPtr = omni::core::ObjectPtr<IBackgroundTask>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IBackgroundTask.gen.h>
//! @copydoc omni::graph::exec::unstable::IBackgroundTask_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IBackgroundTask
: public omni::core::Generated<omni::graph::exec::unstable::IBackgroundTask_abi>
{
public:
//! Marks the task as ready and sets a completion writer that can be invoked by @ref IBackgroundResult.
//!
//! The supplied function should have the signature of `Status(ExecutionTask*)`.
template <typename Fn>
void setResultWriter(Fn&& fn);
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IBackgroundResult.h>
#include <omni/graph/exec/unstable/IBackgroundResultWriter.h>
template <typename Fn>
void omni::graph::exec::unstable::IBackgroundTask::setResultWriter(Fn&& fn)
{
class Writer : public Implements<IBackgroundResultWriter>
{
public:
Writer(Fn&& fn) : m_fn(std::move(fn))
{
}
protected:
Status write_abi(ExecutionTask* info) noexcept override
{
return m_fn(*info);
}
private:
Fn m_fn;
};
setResultWriter_abi(omni::core::steal(new Writer(std::forward<Fn>(fn))).get());
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IBackgroundTask.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/Span.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Span.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::Span.
#pragma once
#include <cstdint>
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! A pointer along with the number of items the pointer points to.
//!
//! This object is ABI-safe.
template <typename T>
class Span
{
public:
//! Constructor.
Span(T* buffer, uint64_t count) noexcept : m_buffer(buffer), m_count(count)
{
static_assert(offsetof(Span<T>, m_buffer) == 0, "unexpected buffer offset");
static_assert(offsetof(Span<T>, m_count) == 8, "unexpected count offset");
static_assert(16 == sizeof(Span<T>), "Span is an unexpected size");
static_assert(std::is_standard_layout<Span<T>>::value, "Span is expected to be abi safe");
}
//! Returns a pointer to the beginning of the array.
T* begin() noexcept
{
return m_buffer;
}
//! Returns a const pointer to the beginning of the array.
const T* begin() const noexcept
{
return m_buffer;
}
//! Returns a pointer to one past the end of the array.
T* end() noexcept
{
return m_buffer + m_count;
}
//! Returns a const pointer to one past the end of the array.
const T* end() const noexcept
{
return m_buffer + m_count;
}
//! Return @c true if the span is empty.
bool empty() const noexcept
{
return (0 == m_count);
}
//! Returns a reference to the first element.
//!
//! Calling when the span is empty is undefined behavior.
T& front() noexcept
{
return *(begin());
}
//! Returns a const reference to the first element.
//!
//! Calling when the span is empty is undefined behavior.
const T& front() const noexcept
{
return *(begin());
}
//! Returns a reference to the last element.
//!
//! Calling when the span is empty is undefined behavior.
T& back() noexcept
{
return *(end() - 1);
}
//! Returns a const reference to the last element.
//!
//! Calling when the span is empty is undefined behavior.
const T& back() const noexcept
{
return *(end() - 1);
}
//! Returns a pointer to the beginning of the array.
T* data() noexcept
{
return m_buffer;
}
//! Returns a pointer to the beginning of the array.
const T* data() const noexcept
{
return m_buffer;
}
//! Returns the number of items in the array.
uint64_t size() const noexcept
{
return m_count;
}
private:
T* m_buffer;
uint64_t m_count;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/ElementAt.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ElementAt.h
//!
//! @brief Defines helper classes to access iteratable items via an ABI.
#pragma once
#include <cstdint>
#include <iterator>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
namespace detail
{
//! Provides iterator access to an interface that defines per-element access.
//!
//! Use this object to wrap an interface that define random element access. The resulting wrapper object can be passed
//! to any algorithm that can iterate over an iterable object (e.g. C++'s built-in range-based `for`).
//!
//! @tparam OwnerType The interface type (e.g. `IMyArray`).
//!
//! @tparam ValueType The type of the value returned from the getter.
//!
//! @tparam GetterType A struct that defines a static `getAt(OwnerType* owner, uint64_t index, ValueType* out)` method.
//! This method is used to access the element at the given index. The struct must also define a
//! static `getCount(OwnerType*)` method which returns the number of items to iterate over.
template <typename OwnerType, typename ValueType, typename GetterType>
struct ElementAt
{
//! Iterator pointing to an element in the iterable range.
struct Iterator
{
//! Type of the iterator
using iterator_category = std::forward_iterator_tag;
//! Type of the value to which the iterator points.
using value_type = ValueType;
//! Pointer to the type of the value to which the iterator points.
using pointer = value_type*;
//! Reference to the type of the value to which the iterator points.
using reference = value_type&;
//! Constructor.
Iterator(OwnerType* owner_, uint64_t index_, uint64_t count_)
: m_owner(owner_), m_index(index_), m_count(count_)
{
_get();
}
//! Dereference operator.
reference operator*()
{
return m_element;
}
//! Dereference operator.
pointer operator->()
{
return &m_element;
}
//! Move to the next item in the container.
Iterator& operator++() noexcept
{
m_index++;
_get();
return *this;
}
//! Move to the next item in the container.
Iterator operator++(int) noexcept
{
Iterator tmp = *this;
++(*this);
return tmp;
}
//! Check if the iterators are equal.
friend bool operator==(const Iterator& a, const Iterator& b) noexcept
{
return ((a.m_owner == b.m_owner) && (a.m_index == b.m_index));
};
//! Check if the iterators are not equal.
friend bool operator!=(const Iterator& a, const Iterator& b) noexcept
{
return ((a.m_owner != b.m_owner) || (a.m_index != b.m_index));
};
private:
void _get()
{
if (m_index < m_count)
{
GetterType::getAt(m_owner, m_index, &m_element);
}
}
OwnerType* m_owner;
ValueType m_element;
uint64_t m_index;
uint64_t m_count;
};
//! Constructor
ElementAt(OwnerType* owner) noexcept : m_owner(owner)
{
}
//! Returns an iterator to the first element.
Iterator begin() const noexcept
{
return Iterator(m_owner, 0, GetterType::getCount(m_owner));
}
//! Returns an invalid iterator past the last element.
Iterator end() const noexcept
{
auto count = GetterType::getCount(m_owner);
return Iterator(m_owner, count, count);
}
//! Returns element count
uint64_t getCount() const noexcept
{
return GetterType::getCount(m_owner);
}
private:
OwnerType* m_owner;
};
} // namespace detail
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/RaceConditionFinder.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file RaceConditionFinder.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::RaceConditionFinder.
#pragma once
#include <carb/Defines.h>
#include <atomic>
#include <thread>
#if CARB_PLATFORM_WINDOWS
#else
# include <signal.h>
#endif
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Helper class for detecting race conditions.
//!
//! This type of a "fake lock" has been known under many names. The goal is to create a "critical section"
//! without enforcing synchronization. If more than one thread falls into this "critical section" it means
//! we haven't managed (at the orchestration level) guarantee sequential execution of particular code.
//!
//! The RC finder is used very similar to a lock, i.e. you allocate a shared RC finder and enter
//! the "critical section" using Scope RAII object. When more than one thread enters this section,
//! we will make them spin forever and issue a debug break from the first thread that started the section.
//! This allows to catch all the threads and easily debug by seeing all the callstacks and states.
//!
//! RC finder supports recursive execution.
//!
//! This is a debugging object and shouldn't be used in released product. Without debugged attached, application will
//! simply crash.
class RaceConditionFinder
{
public:
//! Create scope around the fake "critical section".
class Scope
{
public:
//! Construct the scope with a valid shared finder.
Scope(RaceConditionFinder& finder)
: m_sharedFinder(finder),
m_thisThreadId(std::this_thread::get_id()),
m_originalThreadId(m_sharedFinder.m_currentThread)
{
// handle recursive code paths
bool acquiredSuccessfully = (m_originalThreadId == m_thisThreadId);
if (!acquiredSuccessfully)
{
std::thread::id emptyId;
acquiredSuccessfully = m_sharedFinder.m_currentThread.compare_exchange_strong(emptyId, m_thisThreadId);
}
// infinite loop in here and let the other thread complete and issue debugger break in the destructor
if (!acquiredSuccessfully)
{
m_sharedFinder.m_raceDetected = true;
while (true)
{
std::this_thread::yield();
}
}
}
//! Issue a breakpoint if race condition is detected, otherwise release the section.
~Scope()
{
if (m_sharedFinder.m_raceDetected)
{
#if CARB_PLATFORM_WINDOWS
__debugbreak();
#else
raise(SIGTRAP);
#endif
}
else
{
bool resetSuccessfully =
m_sharedFinder.m_currentThread.compare_exchange_strong(m_thisThreadId, m_originalThreadId);
// this shouldn't be possible
if (!resetSuccessfully)
{
m_sharedFinder.m_raceDetected = true;
while (true)
{
std::this_thread::yield();
}
}
}
}
private:
RaceConditionFinder& m_sharedFinder; //!< Shared finder object used to communicate state across threads
std::thread::id m_thisThreadId; //!< Captured thread ID at the construction of the object
std::thread::id m_originalThreadId; //!< Captured thread ID at the construction time from shared finder
};
private:
std::atomic<std::thread::id> m_currentThread; //!< Thread ID currently holding critical section, or empty ID
std::atomic<bool> m_raceDetected{ false }; //!< Was race condition detected
};
}
}
}
}
|
omniverse-code/kit/include/omni/graph/exec/unstable/IPartitionPass.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPartitionPass.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPartitionPass.
#pragma once
#include <omni/graph/exec/unstable/IPass.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilder;
class INode;
class ITopology;
class IPartitionPass;
class IPartitionPass_abi;
//! Base class for graph partitioning passes.
//!
//! Partition passes are typically run just after population passes and only on newly modified
//! @ref omni::graph::exec::unstable::INodeGraphDef objects. The job of a partition pass is to recognize patterns in the
//! newly populated graph and replace them with a new definition or augment existing one.
//!
//! Partition passes can only mutate the graph from the @ref omni::graph::exec::unstable::IPartitionPass::commit method
//! using provided @ref omni::graph::exec::unstable::IGraphBuilder. This will guarantee that the rest of the pipeline
//! is aware of changes made to the graph and avoid potential threading issues.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPartitionPass_abi : public omni::core::Inherits<IPass, OMNI_TYPE_ID("omni.graph.exec.unstable.IPartitionPass")>
{
protected:
//! Call from pass pipeline to initialize the pass for @p topology.
//!
//! This interface method implementation can't mutate given @p topology. Multiple passes can run concurrently on it.
//!
//! Returns True if initialization was successful and pipeline should issue calls to run and commit.
//! Otherwise this pass will be destroyed and won't participate in partitioning @p topology.
virtual bool initialize_abi(OMNI_ATTR("not_null, throw_if_null") ITopology* topology) noexcept = 0;
//! Call from pass pipeline to discover nodes requiring partitioning.
//!
//! No topology changes are permitted at this point. Multiple passes will get a chance to receive this
//! notification.
//!
//! Call to this method comes from graph traversal that may run multiple passes concurrently.
virtual void run_abi(OMNI_ATTR("not_null, throw_if_null") INode* node) noexcept = 0;
//! Call to verify generated partitions and commit new definition/s replacing discovered partitions.
//!
//! Commit of partitions is done serially and in the priority order of the pass. Passes with higher order will get
//! the chance first. This is the only partition pass method that can mutate the graph.
virtual void commit_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilder* builder) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IPartitionPass.
using PartitionPassPtr = omni::core::ObjectPtr<IPartitionPass>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPartitionPass.gen.h>
//! @copydoc omni::graph::exec::unstable::IPartitionPass_abi
//!
//! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPartitionPass
: public omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/ITopology.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPartitionPass.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/Assert.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Assert.h
//!
//! @brief Defines macros for assertions.
#pragma once
#include <carb/extras/Debugging.h>
#include <omni/core/Assert.h>
#include <omni/core/ResultError.h>
#include <omni/graph/exec/unstable/IBase.h>
//! Debug build assertion.
#define OMNI_GRAPH_EXEC_ASSERT(cond, ...) OMNI_ASSERT(cond, ##__VA_ARGS__)
#ifndef DOXYGEN_BUILD
# define OMNI_GRAPH_EXEC_BREAK_ON_ERROR
#endif
#if defined(OMNI_GRAPH_EXEC_BREAK_ON_ERROR) || defined(DOXYGEN_BUILD)
//! Returns the given @ref omni::core::Result. If a debugger is attached, it will break.
# define OMNI_GRAPH_EXEC_RETURN_ERROR(e_) \
carb::extras::debuggerBreak(); \
return e_;
#else
# define OMNI_GRAPH_EXEC_RETURN_ERROR(e_) return e_;
#endif
//! When authoring ABI methods, use this macro to convert exceptions to @ref omni::core::Result codes.
#define OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() \
catch (const omni::core::ResultError& e_) \
{ \
OMNI_GRAPH_EXEC_RETURN_ERROR(e_.getResult()); \
} \
catch (...) \
{ \
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultFail); \
}
//! Casts @p obj_ to an object of type @p type_. If the cast fails, an exception is thrown.
//!
//! The resulting pointer is stored in @p var_.
#define OMNI_GRAPH_EXEC_CAST_OR_RETURN(var_, type_, obj_) \
auto var_ = omni::graph::exec::unstable::cast<type_>(obj_); \
do \
{ \
if (!var_) \
{ \
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultNoInterface); \
} \
} while (0)
|
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDefDebug.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Interface containing debugging methods for @ref omni::graph::exec::unstable::INodeGraphDef.
//!
//! Implementation of this interface is optional.
template <>
class omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>
: public omni::graph::exec::unstable::INodeGraphDefDebug_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeGraphDefDebug")
//! Returns the current execution count. A value of 0 means the graph is not executing.
uint64_t getExecutionCount() noexcept;
//! Increments the execution count.
void incrementExecutionCount() noexcept;
//! Decrements the execution count. It is undefined behavior for call decrement more than increment.
void decrementExecutionCount() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline uint64_t omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>::getExecutionCount() noexcept
{
return getExecutionCount_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>::incrementExecutionCount() noexcept
{
incrementExecutionCount_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>::decrementExecutionCount() noexcept
{
decrementExecutionCount_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutorFactory.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ExecutorFactory.h
//!
//! @brief Declares @ref omni::graph::exec::unstable::ExecutorFactory
#pragma once
#include <omni/graph/exec/unstable/IExecutor.h>
#include <functional>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class ExecutionTask;
class ITopology;
//! Factory owned by node graph definition used to instantiate executor to generate the work
//!
//! May throw.
using ExecutorFactory =
std::function<omni::core::ObjectPtr<IExecutor>(omni::core::ObjectParam<ITopology>, const ExecutionTask&)>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/INode.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file INode.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::INode.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Span.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IDef;
class INode_abi;
class INode;
class INodeDef;
class INodeGraphDef;
class ITopology;
//! Represents work in a graph. Nodes point to a shared execution definition to state the actual work.
//!
//! @ref omni::graph::exec::unstable::INode is the main structural component used to build a graph's topology. @ref
//! omni::graph::exec::unstable::INode stores edges to *parents* (i.e. predecessors) and *children* (i.e. successors).
//! These edges set an ordering between nodes. See @ref omni::graph::exec::unstable::INode::getParents() and @ref
//! omni::graph::exec::unstable::INode::getChildren() respectively.
//!
//! A node represents work to be performed. The description of the work to be performed is stored in a *definition*
//! (i.e. @ref omni::graph::exec::unstable::IDef). Each node wishing to perform work points to a definition (see @ref
//! omni::graph::exec::unstable::INode::getDef()).
//!
//! The definition to which a node points can be one of two types. The first type, @ref
//! omni::graph::exec::unstable::INodeDef, defines work opaquely (i.e. EF is unable to view the work definition and
//! potentially optimize it). The second type, @ref omni::graph::exec::unstable::INodeGraphDef, defines work with a
//! graph. This last representation is the most power as it allows for both *extensibilty* and *composibility* in EF.
//!
//! @rst
//!
//! .. image:: /../docs/ef-simple-w-defs.svg
//! :align: center
//!
//! @endrst
//!
//! Above, we see that nodes point to graph definitions, which contain other nodes that point to other graph
//! definitions. This structure of graphs pointing to other graphs is where EF gets its *graph of graphs* name.
//!
//! Not all nodes will point to a definition. For example, the @rstref{root node <ef_root_node>} in each graph
//! definition will not point to a definition.
//!
//! A node is always part of a graph definition and the graph definition's executor is responsible for orchestrating and
//! generating work to the scheduler.
//!
//! Node's within a graph definition are assigned a unique index, between zero and the number of nodes in the
//! definition. This index is often used as a lookup into transient arrays used to store state during graph traversals.
//! See @ref omni::graph::exec::unstable::INode::getIndexInTopology().
//!
//! Nodes have a notion of validity. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details.
//!
//! @ref omni::graph::exec::unstable::INode does not contain methods for either settings the node's definition or
//! connecting nodes to each other. This functionality is reserved for @ref omni::graph::exec::unstable::IGraphBuilder.
//! See @rstref{Graph Construction <ef_pass_concepts>} for details.
//!
//! See @rstref{Graph Concepts <ef_graph_concepts>} for a guide on how this object relates to other objects in the
//! Execution Framework.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Users may wish to implement this interface to store meaningful authoring level data in EF. For example, OmniGraph
//! uses an implementation of this node to store graph instancing information. See @ref
//! omni::graph::exec::unstable::Node for a concrete implementation of this interface suitable for sub-classing.
class INode_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.INode")>
{
public:
using NodeArray = Span<INode* const>; //!< Stores the list of parents and children.
protected:
//! Access topology owning this node
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0;
//! Access node's unique identifier name.
virtual OMNI_ATTR("ref") const ConstName* getName_abi() noexcept = 0;
//! Access nodes unique index withing owning topology. Index will be always smaller than topology size.
virtual NodeIndexInTopology getIndexInTopology_abi() noexcept = 0;
//! Access parents.
virtual Span<INode* const> getParents_abi() noexcept = 0;
//! Access children.
virtual Span<INode* const> getChildren_abi() noexcept = 0;
//! Return number of parents that cause cycles within the graph during traversal over this node.
virtual uint32_t getCycleParentCount_abi() noexcept = 0;
//! Check if topology/connectivity of nodes is valid within current topology version.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation.
virtual bool isValidTopology_abi() noexcept = 0;
//! Make topology valid for current topology version. Drop all the connections if topology changed.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation.
virtual void validateOrResetTopology_abi() noexcept = 0;
//! Access base node definition (can be empty).
//!
//! When you wish to determine if the attached definition is either opaque or a graph, consider calling @ref
//! omni::graph::exec::unstable::INode::getNodeDef() or @ref omni::graph::exec::unstable::INode::getNodeGraphDef()
//! rather than this method.
//!
//! The returned @ref omni::graph::exec::unstable::IDef will *not* have @ref omni::core::IObject::acquire() called
//! before being returned.
virtual OMNI_ATTR("no_acquire") IDef* getDef_abi() noexcept = 0;
//! Access node definition (can be empty).
//!
//! If the returned pointer is @c nullptr, either the definition does not implement @ref
//! omni::graph::exec::unstable::INodeDef or there is no definition attached to the node.
//!
//! The returned @ref omni::graph::exec::unstable::INodeDef will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
//!
//! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref
//! omni::graph::exec::unstable::INode::getNodeGraphDef().
virtual OMNI_ATTR("no_acquire") INodeDef* getNodeDef_abi() noexcept = 0;
//! Access node's graph definition (can be empty)
//!
//! The returned graph definition pointer is the graph definition which defines the work this node represents. The
//! returned pointer **is not** the graph definition that contains this node.
//!
//! If the returned pointer is @c nullptr, either the definition does not implement @ref
//! omni::graph::exec::unstable::INodeGraphDef or there is no definition attached to the node.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
//!
//! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref
//! omni::graph::exec::unstable::INode::getNodeDef().
virtual OMNI_ATTR("no_acquire") INodeGraphDef* getNodeGraphDef_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref INode.
using NodePtr = omni::core::ObjectPtr<INode>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/INode.gen.h>
//! @copydoc omni::graph::exec::unstable::INode_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::INode : public omni::core::Generated<omni::graph::exec::unstable::INode_abi>
{
public:
//! Returns the root of the graph definition of which this node is a part.
inline INode* getRoot() noexcept;
//! Check if this node is the root of the graph/topology.
inline bool isRoot() noexcept;
//! Check if a given node is a parent of this node.
inline bool hasParent(omni::core::ObjectParam<INode> parent) noexcept;
//! Check if a given node is a child of this node.
inline bool hasChild(omni::core::ObjectParam<INode> child) noexcept;
};
#include <omni/graph/exec/unstable/INodeDef.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/ITopology.h>
inline omni::graph::exec::unstable::INode* omni::graph::exec::unstable::INode::getRoot() noexcept
{
return getTopology()->getRoot();
}
inline bool omni::graph::exec::unstable::INode::isRoot() noexcept
{
return (getRoot() == this);
}
inline bool omni::graph::exec::unstable::INode::hasParent(omni::core::ObjectParam<INode> parent) noexcept
{
auto parents = getParents();
return std::find(parents.begin(), parents.end(), parent.get()) != parents.end();
}
inline bool omni::graph::exec::unstable::INode::hasChild(omni::core::ObjectParam<INode> child) noexcept
{
auto children = getChildren();
return std::find(children.begin(), children.end(), child.get()) != children.end();
}
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/INode.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/GraphBuilderContext.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file GraphBuilderContext.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilderContext.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/IGraphBuilderContext.h>
#include <omni/graph/exec/unstable/IPassPipeline.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IGraphBuilderContext
template <typename... Bases>
class GraphBuilderContextT : public Implements<Bases...>
{
public:
//! Construct graph builder context for a given @ref IGraph with a given pass transformation pipeline.
//!
//! May throw.
static omni::core::ObjectPtr<GraphBuilderContextT> create(omni::core::ObjectParam<IGraph> graph,
omni::core::ObjectParam<IPassPipeline> passPipeline)
{
OMNI_THROW_IF_ARG_NULL(graph);
OMNI_THROW_IF_ARG_NULL(passPipeline);
return omni::core::steal(new GraphBuilderContextT(graph.get(), passPipeline.get()));
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::getConstructionStamp_abi
Stamp getConstructionStamp_abi() noexcept override
{
return m_constructionStamp;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::getGraph_abi
IGraph* getGraph_abi() noexcept override
{
return m_owner;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::report_abi
void report_abi(const char* diagnose) noexcept override
{
// Default implementation doesn't report anything
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::runTransformations_abi
void runTransformations_abi(INodeGraphDef* nodeGraphDef) noexcept override
{
m_pipeline->execute(this, nodeGraphDef);
m_constructionStamp.next();
}
//! Constructor
GraphBuilderContextT(IGraph* graph, IPassPipeline* pipeline)
: m_owner{ graph }, m_pipeline{ pipeline, omni::core::kBorrow }
{
}
private:
IGraph* m_owner; //!< Owner of all graphs this context touches
PassPipelinePtr m_pipeline; //!< Graph transformations pipeline used in this context
Stamp m_constructionStamp; //!< Construction version incremented after pipeline run.
};
//! Core GraphBuilderContext implementation for @ref omni::graph::exec::unstable::IGraphBuilderContext
using GraphBuilderContext = GraphBuilderContextT<IGraphBuilderContext>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutionContext.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ExecutionContext.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::ExecutionContext.
#pragma once
#include <carb/thread/RecursiveSharedMutex.h>
#include <carb/thread/SharedMutex.h>
#include <carb/thread/Spinlock.h>
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/ExecutionPath.h>
#include <omni/graph/exec/unstable/Executor.h>
#include <omni/graph/exec/unstable/IExecutionContext.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/INodeGraphDefDebug.h>
#include <omni/graph/exec/unstable/SmallVector.h>
#include <omni/graph/exec/unstable/Traversal.h>
#include <thread>
#include <unordered_map>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Implementation details for omni::graph::exec. Items in this namespace should not be relied on outside of the API.
namespace detail
{
//! Utility class for discovering all execution paths for a given definition
//!
//! Searches are cached until topology of execution graph changes.
//! Invalidation of the cache happens lazy upon request.
//!
//! This class is thread-safe and can be utilized recurrently.
class ExecutionPathCache
{
public:
//! Default constructor is removed
ExecutionPathCache() = delete;
//! Constructor
explicit ExecutionPathCache(IGraph& graph) noexcept : m_graph(graph)
{
}
//! Call given function for every execution path that points to given node or node graph definition
//!
//! Function should have the signature of `void(const ExecutionPath&)`
template <typename Key>
void applyOnEach(const Key& key, IApplyOnEachFunction& applyFn)
{
if (m_graph.inBuild())
{
// traversing the entire graph while building it is isn't allowed since multiple threads may be building it
OMNI_GRAPH_EXEC_ASSERT(!m_graph.inBuild());
return;
}
if (!m_graph.getTopology()->isValid())
{
return;
}
auto discoverAndApplyOnNodesWithDefinitionFn = [this, &key, &applyFn](
const ExecutionPath& upstreamPath, INodeGraphDef& graph,
Paths& collectedPaths, auto recursionFn) -> void
{
traversal_dfs<VisitFirst>(
graph.getRoot(),
[this, &upstreamPath, &key, &recursionFn, &applyFn, &collectedPaths](auto info, INode* prev, INode* curr)
{
auto currNodeGraph = curr->getNodeGraphDef();
if (currNodeGraph)
{
ExecutionPath newUpstreamPath(upstreamPath, curr);
recursionFn(newUpstreamPath, *currNodeGraph, collectedPaths, recursionFn);
}
auto def = curr->getDef();
if (def && _isMatch(key, def))
{
collectedPaths.emplace_back(upstreamPath, curr);
applyFn.invoke(collectedPaths.back());
}
info.continueVisit(curr);
});
};
// check if the this cache is in-sync with the current topology. since we can run this method in parallel, we
// need a read lock to m_mutex to safely read m_topologyStamp
std::shared_lock<MutexType> readLock(m_mutex);
auto topologyStamp = *m_graph.getGlobalTopologyStamp();
if (!m_topologyStamp.inSync(topologyStamp))
{
// cache is out-of-sync. upgrade to a write lock.
readLock.unlock();
{
// here we once again check to see if the cache is in-sync since another thread may have beat this
// thread to the write lock and brought the cache into sync.
std::lock_guard<MutexType> writeLock(m_mutex);
if (m_topologyStamp.makeSync(topologyStamp))
{
// we're the thread that got to the write lock first. its our job to clear the cache.
m_defCache.clear();
m_nameCache.clear();
}
}
// grab the read lock again so we can safely read the cache
readLock.lock();
}
auto& cache = _getCache(key);
auto findIt = cache.find(key);
if (findIt != cache.end())
{
// We've seen this name before. Make a copy of the paths so we can release the readLock. This is
// required because an invocation can result in re-entering and taking the writeLock.
auto pathsCopy = findIt->second;
readLock.unlock();
for (ExecutionPath& path : pathsCopy)
{
applyFn.invoke(path);
}
}
else
{
// Release readLock because apply below can result in re-entry of this function
readLock.unlock();
// either the key wasn't found or we're building the graph
Paths paths;
discoverAndApplyOnNodesWithDefinitionFn(
ExecutionPath::getEmpty(), *m_graph.getNodeGraphDef(), paths, discoverAndApplyOnNodesWithDefinitionFn);
// Insert only once we collected all the paths. Some other thread may be looking for this definition at
// the same time.
std::lock_guard<MutexType> writeLock(m_mutex);
cache.emplace(key, std::move(paths));
}
}
private:
bool _isMatch(const ConstName& desired, IDef* candidate)
{
return (desired == candidate->getName());
}
bool _isMatch(IDef* desired, IDef* candidate)
{
return (desired == candidate);
}
auto& _getCache(const ConstName&)
{
return m_nameCache;
}
auto& _getCache(IDef*)
{
return m_defCache;
}
using Paths = SmallVector<ExecutionPath, 2>;
using DefCache = std::unordered_map<IDef*, Paths>;
using NameCache = std::unordered_map<ConstName, Paths>;
using MutexType = carb::thread::recursive_shared_mutex;
IGraph& m_graph; //!< Execution graph to search for execution paths
DefCache m_defCache; //!< Storage for already discovered paths (keyed on def ptr)
NameCache m_nameCache; //!< Storage for already discovered paths (keyed on def name)
MutexType m_mutex; //!< Mutex to allow concurrent utilization of cache and serialized insertion
SyncStamp m_topologyStamp; //!< Topology of execution graph this cache is valid for
};
} // namespace detail
//! @copydoc omni::graph::exec::unstable::IExecutionContext
template <typename StorageType, typename ParentInterface = IExecutionContext>
class ExecutionContext : public Implements<ParentInterface>
{
protected:
//! Helper RAII object controlling in execution flag.
class ScopedInExecute
{
public:
//! Constructor
ScopedInExecute(ExecutionContext& context) : m_context(context)
{
std::lock_guard<carb::thread::Spinlock> lock(m_context.m_threadIdSpinlock);
++m_context.m_contextThreadIds[std::this_thread::get_id()];
}
//! Destructor
~ScopedInExecute()
{
std::lock_guard<carb::thread::Spinlock> lock(m_context.m_threadIdSpinlock);
--m_context.m_contextThreadIds[std::this_thread::get_id()];
if (m_context.m_contextThreadIds[std::this_thread::get_id()] == 0)
{
m_context.m_contextThreadIds.erase(std::this_thread::get_id());
}
}
private:
ExecutionContext& m_context; //!< Context in execution
};
//! @copydoc omni::graph::exec::unstable::IExecutionContext::getExecutionStamp_abi
Stamp getExecutionStamp_abi() noexcept override
{
return m_executionStamp;
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::inExecute_abi
bool inExecute_abi() noexcept override
{
std::lock_guard<carb::thread::Spinlock> lock(m_threadIdSpinlock);
return !m_contextThreadIds.empty();
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::isExecutingThread_abi
bool isExecutingThread_abi() noexcept override
{
std::lock_guard<carb::thread::Spinlock> lock(m_threadIdSpinlock);
return m_contextThreadIds.find(std::this_thread::get_id()) != m_contextThreadIds.end();
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::execute_abi
Status execute_abi() noexcept override
{
if (!m_initStamp.inSync(m_graph->getTopology()->getStamp()))
{
this->initialize();
}
m_executionStamp = _getNextGlobalExecutionStamp();
ScopedInExecute scopedInExecute(*this);
ScopedExecutionDebug scopedDebug{ m_graph->getNodeGraphDef() };
return getCurrentThread()->executeGraph(m_graph, this);
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::executeNode_abi
Status executeNode_abi(const ExecutionPath* path, INode* node) noexcept override
{
if (!m_initStamp.inSync(m_graph->getTopology()->getStamp()))
{
this->initialize();
}
m_executionStamp = _getNextGlobalExecutionStamp();
ScopedInExecute scopedInExecute(*this);
ScopedExecutionDebug scopedDebug{ m_graph->getNodeGraphDef() };
auto def = node->getDef();
if (def)
{
ExecutionTask newTask{ this, node, *path };
auto tmpExecutor = ExecutorFallback::create(node->getTopology(), newTask);
return newTask.execute(tmpExecutor);
}
else
{
return Status::eFailure;
}
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::initialize_abi
omni::core::Result initialize_abi() noexcept override
{
try
{
if (!m_initStamp.makeSync(m_graph->getTopology()->getStamp()))
{
return omni::core::kResultSuccess;
}
auto traversalFn = [this](INodeGraphDef* nodeGraphDef, const ExecutionPath& path, auto& recursionFn) -> void
{
ExecutionTask info(this, nodeGraphDef->getRoot(), path);
nodeGraphDef->initializeState(info); // may throw
traversal_dfs<VisitFirst>(nodeGraphDef->getRoot(),
[&path, &recursionFn, nodeGraphDef](auto info, INode* prev, INode* curr)
{
auto currNodeGraphDef = curr->getNodeGraphDef();
if (currNodeGraphDef)
{
ExecutionPath newPath{ path, curr }; // may throw
recursionFn(currNodeGraphDef, newPath, recursionFn);
}
info.continueVisit(curr);
});
};
ExecutionPath path;
traversalFn(m_graph->getNodeGraphDef(), path, traversalFn); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::getStateInfo_abi
virtual omni::core::Result getStateInfo_abi(const ExecutionPath* path,
INode* node,
IExecutionStateInfo** out) noexcept override
{
try
{
*out = m_storage.getStateInfo(*path, node); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::getNodeData_abi
virtual omni::core::Result getNodeData_abi(const ExecutionPath* path,
INode* node,
NodeDataKey key,
omni::core::TypeId* outTypeId,
void** outPtr,
uint64_t* outItemSize,
uint64_t* outBufferSize) noexcept override
{
try
{
m_storage.getNodeData(*path, node, key, outTypeId, outPtr, outItemSize, outBufferSize); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::setNodeData_abi
virtual omni::core::Result setNodeData_abi(const ExecutionPath* path,
INode* node,
NodeDataKey key,
omni::core::TypeId typeId,
void* data,
uint64_t dataByteCount,
uint64_t dataItemCount,
NodeDataDeleterFn* deleter) noexcept override
{
try
{
m_storage.setNodeData(*path, node, key, typeId, data, dataByteCount, dataItemCount, deleter); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::applyOnEachDef_abi
void applyOnEachDef_abi(IDef* def, IApplyOnEachFunction* callback) noexcept override
{
m_pathCache.applyOnEach(def, *callback);
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::applyOnEachDefWithName_abi
void applyOnEachDefWithName_abi(const ConstName* name, IApplyOnEachFunction* callback) noexcept override
{
m_pathCache.applyOnEach(*name, *callback);
}
//! Constructor
ExecutionContext(IGraph* graph) noexcept
: m_graph(graph), m_executionStamp(_getNextGlobalExecutionStamp()), m_pathCache(*graph)
{
}
StorageType m_storage; //!< Data store.
private:
static Stamp _getNextGlobalExecutionStamp() noexcept
{
// since this is private, and will only be accessed indirectly via virtual methods, declaring this inline static
// should be ok
static Stamp gExecutionStamp;
gExecutionStamp.next();
return gExecutionStamp;
}
IGraph* m_graph{ nullptr }; //!< Graph associated with this context.
Stamp m_executionStamp; //!< Execution version incremented with each execution.
SyncStamp m_initStamp; //!< State initialization version. Synchronized with graph topology.
detail::ExecutionPathCache m_pathCache; //!< Cache of execution paths for a given definition. Populated lazily and
//!< thread-safe.
std::unordered_map<std::thread::id, size_t> m_contextThreadIds; //!< Unordered map of thread ids that kickstarted
//!< context execution, along with a counter that
//!< tracks the number of times that
//!< nested/recursive execution has been triggered
//!< by those context-starting threads.
carb::thread::Spinlock m_threadIdSpinlock; //!< Mutex to protect m_contextThreadIds from concurrent write
//!< operations.
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/NodePartition.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file NodePartition.h
//!
//! @brief Defines omni::graph::exec::unstable::NodePartition.
#pragma once
#include <omni/graph/exec/unstable/Span.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations
class INode;
//! Type definition used to pass node partitions in the ABI.
using NodePartition = omni::graph::exec::unstable::Span<INode* const>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/IGlobalPass.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IGlobalPass.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGlobalPass.
#pragma once
#include <omni/graph/exec/unstable/IPass.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilder;
class IGlobalPass;
class IGlobalPass_abi;
//! Base class for global passes.
//!
//! The purpose of a global pass is to perform global transformations on the graph.
//!
//! This transformation category should be considered as a last resort given its global impact on the topology which
//! prevents threading at the pass pipeline level.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IGlobalPass_abi : public omni::core::Inherits<IPass, OMNI_TYPE_ID("omni.graph.exec.unstable.IGlobalPass")>
{
protected:
//! Call from pass pipeline to apply global graph transformations.
virtual OMNI_ATTR("throw_result") omni::core::Result run_abi(IGraphBuilder* builder) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IGlobalPass.
using GlobalPassPtr = omni::core::ObjectPtr<IGlobalPass>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IGlobalPass.gen.h>
//! @copydoc omni::graph::exec::unstable::IGlobalPass_abi
//!
//! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IGlobalPass
: public omni::core::Generated<omni::graph::exec::unstable::IGlobalPass_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilder.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IGlobalPass.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/CompactUniqueIndex.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file CompactUniqueIndex.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::CompactUniqueIndex.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/Types.h>
#include <vector>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Registry of unique indexes with recycling of released indexes.
//!
//! Call @ref acquireUniqueIndex() to retrieve a unique index. Indexes are "compact", meaning abandoned indices will be
//! reused. This means that if @ref releaseUniqueIndex() is called with a value of 6, the next call to @ref
//! acquireUniqueIndex() will return 6.
//!
//! This class is useful for assigning a stable unique index to a set of dynamic items.
//!
//! Methods are not thread safe unless otherwise stated.
class CompactUniqueIndex
{
public:
//! Invalid index is used when no free indexes are available, and as well
//! as a value for reserved elements of the allocation array (an implementation detail)
enum : std::size_t
{
kInvalidIndex = kInvalidNodeIndexInTopology
};
//! Constructor
CompactUniqueIndex() noexcept = default;
//! Destructor
~CompactUniqueIndex() noexcept = default;
//! Returns a unique index.
//!
//! If @ref releaseUniqueIndex() was previously called, the value passed to it will be returned (i.e. the index will
//! be recycled). Otherwise, a new index is allocated that is one greater than the current max index.
//!
//! May throw.
inline std::size_t acquireUniqueIndex();
//! Marks an index as no longer used.
//!
//! A subsequent call to @ref acquireUniqueIndex() will prefer reusing the index given to this method.
//!
//! If @p indexToFree was not previously returned by @ref acquireUniqueIndex, undefined behavior will result.
inline void releaseUniqueIndex(std::size_t indexToFree);
//! Returns the size of the registry.
//!
//! The maximum number of indices is returned, not the current number of "active" indices. Said differently, if
//! @ref acquireUniqueIndex() is called followed by @ref releaseUniqueIndex(), @ref size() would return 1 not 0.
std::size_t size() const
{
return m_allocatedIndexes.size();
}
private:
//! Index registry. Holds acquired and released indexes.
std::vector<std::size_t> m_allocatedIndexes;
//! All released indexes will form a list and m_lastFree points to the last released / first item of the list.
std::size_t m_lastFree{ kInvalidIndex };
};
inline std::size_t CompactUniqueIndex::acquireUniqueIndex()
{
// no free index to recycle, allocate a new one
if (m_lastFree == kInvalidIndex)
{
m_allocatedIndexes.emplace_back(kInvalidIndex);
OMNI_GRAPH_EXEC_ASSERT(m_allocatedIndexes.size() > 0);
return m_allocatedIndexes.size() - 1;
}
// recycle existing index
else
{
OMNI_GRAPH_EXEC_ASSERT(m_lastFree < m_allocatedIndexes.size());
std::size_t recycledIndex = m_lastFree;
m_lastFree = m_allocatedIndexes[recycledIndex];
m_allocatedIndexes[recycledIndex] = kInvalidIndex;
return recycledIndex;
}
}
inline void CompactUniqueIndex::releaseUniqueIndex(std::size_t indexToFree)
{
OMNI_GRAPH_EXEC_ASSERT(indexToFree < m_allocatedIndexes.size());
OMNI_GRAPH_EXEC_ASSERT(m_allocatedIndexes[indexToFree] == kInvalidIndex);
if (indexToFree < m_allocatedIndexes.size() && m_allocatedIndexes[indexToFree] == kInvalidIndex)
{
if (m_lastFree == kInvalidIndex)
m_lastFree = indexToFree;
else
{
m_allocatedIndexes[indexToFree] = m_lastFree;
m_lastFree = indexToFree;
}
}
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/PassRegistry.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file PassRegistry.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPassRegistry.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGlobalPass.h>
#include <omni/graph/exec/unstable/IPartitionPass.h>
#include <omni/graph/exec/unstable/IPassFactory.h>
#include <omni/graph/exec/unstable/IPassRegistry.h>
#include <omni/graph/exec/unstable/IPopulatePass.h>
#include <omni/graph/exec/unstable/Types.h>
#include <memory>
#include <string>
#include <vector>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Scoped object that registers a pass factory in its constructor and deregisters in the objects destructor.
//!
//! Useful for temporarily registering @ref IPassFactory, for example, in a unit test.
//!
//! When registering a pass in a plugin, rather than using this object, prefer using one of the pass registration macros
//! (e.g. @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS()). See @ref groupOmniGraphExecPassRegistration for a list of
//! registration macros.
class ScopedPassRegistration
{
public:
//! Constructor. Calls @ref IPassRegistry::registerPass().
//!
//! May throw.
ScopedPassRegistration(PassType type,
const char* name,
omni::core::ObjectParam<IPassFactory> factory,
const ConstName& nameToMatch = ConstName(),
PassPriority priority = 0)
: m_type(type), m_name(name)
{
OMNI_THROW_IF_ARG_NULL(name);
_register(factory.get(), nameToMatch, priority);
}
//! Constructor. Calls @ref IPassRegistry::registerPass().
//!
//! The given function should have the signature `IPass*(IGraphBuilder*)`.
//!
//! May throw.
template <typename Fn>
ScopedPassRegistration(
PassType type, const char* name, Fn&& fn, const ConstName& nameToMatch = ConstName(), PassPriority priority = 0)
: m_type(type), m_name(name)
{
OMNI_THROW_IF_ARG_NULL(name);
_register(createPassFactory(std::forward<Fn>(fn)).get(), nameToMatch, priority);
}
//! Constructor. Calls @ref IPassRegistry::registerPass().
//!
//! May throw.
ScopedPassRegistration(PassType type,
std::string&& name,
omni::core::ObjectParam<IPassFactory> factory,
const ConstName& nameToMatch = ConstName(),
PassPriority priority = 0)
: m_type(type), m_name(std::move(name))
{
_register(factory.get(), nameToMatch, priority);
}
//! Destructor. Calls @ref IPassRegistry::deregisterPass().
~ScopedPassRegistration() noexcept
{
if (m_registry)
{
m_registry->deregisterPass(m_type, m_name.c_str());
}
}
private:
CARB_PREVENT_COPY_AND_MOVE(ScopedPassRegistration);
void _register(IPassFactory* factory, const ConstName& nameToMatch, PassPriority priority)
{
OMNI_THROW_IF_ARG_NULL(factory);
m_registry = getPassRegistry();
if (m_registry)
{
getPassRegistry()->registerPass(m_type, m_name.c_str(), factory, nameToMatch, priority);
}
}
IPassRegistry* m_registry;
PassType m_type;
std::string m_name;
};
#ifndef DOXYGEN_BUILD
namespace detail
{
struct PassRegistrationInfo
{
PassType type;
std::string name;
PassFactoryPtr factory;
ConstName nameToMatch;
PassPriority priority;
PassRegistrationInfo(
PassType type_, const char* name_, PassFactoryPtr&& factory_, ConstName&& nameToMatch_, PassPriority priority_)
: type(type_), name(name_), factory(std::move(factory_)), nameToMatch(std::move(nameToMatch_)), priority(priority_)
{
}
};
//! Return the per module (e.g. DLL) list of passes that should be registered.
//!
//! This function is an implementation detail and should not be directly used. Rather, populate this list with one of
//! the following macros:
//!
//! - @ref OMNI_GRAPH_EXEC_REGISTER_PASS()
//!
//! - @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS()
//!
//! This list is cleared after the module developer calls @ref registerModulePasses().
inline std::vector<PassRegistrationInfo>& getModulePassesToRegister()
{
static std::vector<PassRegistrationInfo> sPasses;
return sPasses;
}
//! Return the per module (e.g. DLL) list of passes that should be deregistered.
//!
//! This function is an implementation detail and should not be directly used.
//!
//! This list is populated by @ref registerModulePasses().
inline std::vector<std::unique_ptr<ScopedPassRegistration>>& getModulePassesToDeregister()
{
static std::vector<std::unique_ptr<ScopedPassRegistration>> sPasses;
return sPasses;
}
} // namespace detail
# define OMNI_GRAPH_EXEC_CONCAT_(a_, b_) a_##b_
# define OMNI_GRAPH_EXEC_CONCAT(a_, b_) OMNI_GRAPH_EXEC_CONCAT_(a_, b_)
# define OMNI_GRAPH_EXEC_REGISTER_PASS_(type_, class_, var_, nameToMatch, priority_) \
static auto var_ = []() \
{ \
omni::graph::exec::unstable::detail::getModulePassesToRegister().emplace_back( \
type_, #class_, \
omni::graph::exec::unstable::createPassFactory([](omni::graph::exec::unstable::IGraphBuilder* b) \
{ return class_::create(b); }), \
omni::graph::exec::unstable::ConstName(nameToMatch), priority_); \
return 0; \
}()
#endif
//! @defgroup groupOmniGraphExecPassRegistration Pass Registration
//!
//! @brief Macros to register a plugin's passes.
//!
//! Pass registration macros should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call registration macros from a *.cpp*
//! file rather than a *.h* file.
//!
//! Registration macros only add the pass to a list of passes to register. This is useful if you have passes defined in
//! several **.cpp** files in your module. It is up to the module developer to call @ref registerModulePasses() and
//! @ref deregisterModulePasses() to perform the actual registration.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
//!
//! @ingroup groupOmniGraphExecPasses
//! Adds an @ref omni::graph::exec::unstable::IPass to a list to be registered at the module's (i.e.g DLL) startup.
//!
//! @param type_ A @ref omni::graph::exec::unstable::PassType.
//!
//! @param class_ An implementation of @ref omni::graph::exec::unstable::IPass.
//!
//! This macro should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file
//! rather than a *.h* file.
//!
//! This macro only adds the pass to a list of passes to register. This is useful if you have passes defined in several
//! **.cpp** files in your module. It is up to the module developer to call @ref
//! omni::graph::exec::unstable::registerModulePasses() and
//! @ref omni::graph::exec::unstable::deregisterModulePasses() to perform the actual registration.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
#define OMNI_GRAPH_EXEC_REGISTER_PASS(type_, class_) \
OMNI_GRAPH_EXEC_REGISTER_PASS_(type_, class_, OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), "", 0)
//! Adds an @ref omni::graph::exec::unstable::IPopulatePass to a list to be registered as type
//! @ref omni::graph::exec::unstable::PassType::ePopulate at the module's (i.e.g DLL) startup.
//!
//! @param class_ An implementation of @ref omni::graph::exec::unstable::IPopulatePass.
//!
//! @param defNameToPopulate_ The name of the definition, @ref omni::graph::exec::unstable::IDef, this pass should
//! populate. An example would be "OmniGraph".
//!
//! This macro should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file
//! rather than a *.h* file.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
#define OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS(class_, defNameToPopulate_) \
static_assert(std::is_base_of<omni::graph::exec::unstable::IPopulatePass, class_>::value, \
"Registering a class that doesn't implement IPopulatePass"); \
OMNI_GRAPH_EXEC_REGISTER_PASS_(omni::graph::exec::unstable::PassType::ePopulate, class_, \
OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), defNameToPopulate_, 0)
//! Adds an @ref omni::graph::exec::unstable::IPartitionPass to a list to be registered as type
//! @ref omni::graph::exec::unstable::PassType::ePartitioning at the module's (i.e.g DLL) startup.
//!
//! @param class_ An implementation of @ref omni::graph::exec::unstable::IPartitionPass.
//!
//! @param priority_ @ref omni::graph::exec::unstable::PassPriority used to resolve conflicts between passes
//!
//! This macro should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file
//! rather than a *.h* file.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
#define OMNI_GRAPH_EXEC_REGISTER_PARTITION_PASS(class_, priority_) \
static_assert(std::is_base_of<omni::graph::exec::unstable::IPartitionPass, class_>::value, \
"Registering a class that doesn't implement IPartitionPass"); \
OMNI_GRAPH_EXEC_REGISTER_PASS_(omni::graph::exec::unstable::PassType::ePartitioning, class_, \
OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), "", priority_)
//! Adds an @ref omni::graph::exec::unstable::IGlobalPass to a list to be registered as type
//! @ref omni::graph::exec::unstable::PassType::eGlobal at the module's (i.e.g DLL) startup.
//!
//! @param class_ An implementation of @ref omni::graph::exec::unstable::IGlobalPass.
//!
//! This macro should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file
//! rather than a *.h* file.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
#define OMNI_GRAPH_EXEC_REGISTER_GLOBAL_PASS(class_) \
static_assert(std::is_base_of<omni::graph::exec::unstable::IGlobalPass, class_>::value, \
"Registering a class that doesn't implement IGlobalPass"); \
OMNI_GRAPH_EXEC_REGISTER_PASS_(omni::graph::exec::unstable::PassType::eGlobal, class_, \
OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), "", 0)
//! Registers the module's @ref omni::graph::exec::unstable::IPass factories with @ref
//! omni::graph::exec::unstable::IPassRegistry.
//!
//! This function should be called in the module's function registered with omni::core::OMNI_MODULE_ON_MODULE_STARTED().
//! This is usually called @c onStarted().
//!
//! When developing a Kit extension, prefer calling @c OMNI_KIT_EXEC_CORE_ON_MODULE_STARTED() rather than this function.
//!
//! May throw.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
inline void registerModulePasses()
{
auto& toRegister = detail::getModulePassesToRegister();
auto& toDeregister = detail::getModulePassesToDeregister();
for (auto& pass : toRegister)
{
toDeregister.emplace_back(std::make_unique<ScopedPassRegistration>(
pass.type, std::move(pass.name), std::move(pass.factory), std::move(pass.nameToMatch), pass.priority));
}
toRegister.clear();
}
//! Deregisters the module's @ref omni::graph::exec::unstable::IPass factories with @ref IPassRegistry.
//!
//! Failure to call this function may lead to crashes during program shutdown.
//!
//! This function should be called in the module's function registered with omni::core::OMNI_MODULE_ON_MODULE_UNLOAD().
//! This is usually called @c onUnload().
//!
//! When developing a Kit extension, prefer calling @c OMNI_KIT_EXEC_CORE_ON_MODULE_UNLOAD() rather than this function.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
inline void deregisterModulePasses() noexcept
{
detail::getModulePassesToDeregister().clear();
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/AtomicBackoff.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file AtomicBackoff.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::AtomicBackoff.
#pragma once
#include <carb/Defines.h>
#if CARB_X86_64
# include <immintrin.h>
#endif
#include <thread>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Exponential backoff pattern for waiting with a cap number of pauses
//!
//! This class implements exponential backoff, where each call to pause will
//! cause busy waiting and increment the number of iterations for next pause call.
//! All of this is cap with a maximum limit of pause calls after which waiting
//! is considered long and switches to yield.
//!
//! This class is useful when we expect short waits and would like to prevent
//! yielding the compute resources for this short period of time.
//!
//! Methods are not thread safe unless otherwise noted.
class AtomicBackoff
{
public:
//! Default constructor
constexpr AtomicBackoff() noexcept
{
}
// No use in allowing copy and assignment operators for this class
AtomicBackoff(const AtomicBackoff&) = delete;
AtomicBackoff& operator=(const AtomicBackoff&) = delete;
//! Pause execution for a short period of time.
//!
//! Use exponential backoff pattern and a upper wait cap to select between busy waiting and yielding.
void pause() noexcept
{
if (m_loopCount <= kLoopBeforeYield)
{
auto loop = m_loopCount;
while (loop-- > 0)
{
#if CARB_X86_64
_mm_pause();
#elif CARB_AARCH64
// based on TBB machine_pause and BOOST pause.hpp
__asm__ __volatile__("yield" ::: "memory");
#endif
}
m_loopCount *= 2;
}
else
{
std::this_thread::yield();
}
}
//! Clear wait counter
void reset() noexcept
{
m_loopCount = 1;
}
private:
//! Upper cap for busy waiting. Pass this count the pause call will always yield until reset method is called.
//!
//! The number must be power of two and is approximately equal to number of pause instructions it would take
//! to do a context switch.
enum : int
{
kLoopBeforeYield = 16
};
//! Next number of busy loop iterations. Incremented exponentially and cap at kLoopBeforeYield
int m_loopCount{ 1 };
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionStateInfo.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! State associated with a given execution task
//!
//! @note We separated execution state from the execution graph to allow concurrent and/or nested execution
template <>
class omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>
: public omni::graph::exec::unstable::IExecutionStateInfo_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutionStateInfo")
//! Store a "future" result for this state. The actual computation is running asynchronously outside of execution
//! frame
//!
//! @return \c true if execution state accepts "future" results.
bool storeBackgroundResult(omni::core::ObjectParam<omni::graph::exec::unstable::IBackgroundResult> result);
//! Query used by some executors to determine if computation of a node is necessary
bool needsCompute(const omni::graph::exec::unstable::Stamp& execVersion) noexcept;
//! Set to request computation
void requestCompute() noexcept;
//! Reset request to compute after computation was performed
void setComputed() noexcept;
//! Get current/last exec version set for this node during execution
omni::graph::exec::unstable::SyncStamp getExecutionStamp() noexcept;
//! Set current exec version for this node. Returns true if version wasn't in sync.
bool setExecutionStamp(const omni::graph::exec::unstable::Stamp& execVersion) noexcept;
//! Returns a value from a node's key/value datastore.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is returned in @p outTypeId.
//!
//! @p outPtr will be updated with a pointer to the actual data.
//!
//! @p outItemSize store the size of each item in the returned array.
//!
//! @p outItemCount contains the number of items returned (i.e. the number
//! of items @p outPtr points to). For an array, this will be greater than
//! 1.
//!
//! If the key is not found, @p outPtr is set to @c nullptr and @p
//! outItemCount is set to 0.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
void getNodeData(omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId* outTypeId,
void** outPtr,
uint64_t* outItemSize,
uint64_t* outItemCount);
//! Sets a value in a node's key/value datastore.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is set with @p typeId.
//!
//! @p data points to an array of data items.
//!
//! @p itemSize is the size of each item in the given array.
//!
//! @p itemCount contains the number of items pointed to by @p data. For an
//! array, this will be greater than 1.
//!
//! @p deleter is a function used to delete @p data when either a new value
//! is set at the key or the context is invalidated. If @p deleter is @c
//! nullptr, it is up to the calling code to manage the lifetime of the @p
//! data.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
void setNodeData(omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId typeId,
void* data,
uint64_t itemSize,
uint64_t itemCount,
omni::graph::exec::unstable::NodeDataDeleterFn* deleter);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::storeBackgroundResult(
omni::core::ObjectParam<omni::graph::exec::unstable::IBackgroundResult> result)
{
OMNI_THROW_IF_ARG_NULL(result);
auto return_ = storeBackgroundResult_abi(result.get());
return return_;
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::needsCompute(
const omni::graph::exec::unstable::Stamp& execVersion) noexcept
{
return needsCompute_abi(execVersion);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::requestCompute() noexcept
{
requestCompute_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::setComputed() noexcept
{
setComputed_abi();
}
inline omni::graph::exec::unstable::SyncStamp omni::core::Generated<
omni::graph::exec::unstable::IExecutionStateInfo_abi>::getExecutionStamp() noexcept
{
return getExecutionStamp_abi();
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::setExecutionStamp(
const omni::graph::exec::unstable::Stamp& execVersion) noexcept
{
return setExecutionStamp_abi(execVersion);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::getNodeData(
omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId* outTypeId,
void** outPtr,
uint64_t* outItemSize,
uint64_t* outItemCount)
{
OMNI_THROW_IF_ARG_NULL(outTypeId);
OMNI_THROW_IF_ARG_NULL(outPtr);
OMNI_THROW_IF_ARG_NULL(outItemSize);
OMNI_THROW_IF_ARG_NULL(outItemCount);
OMNI_THROW_IF_FAILED(getNodeData_abi(key, outTypeId, outPtr, outItemSize, outItemCount));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::setNodeData(
omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId typeId,
void* data,
uint64_t itemSize,
uint64_t itemCount,
omni::graph::exec::unstable::NodeDataDeleterFn* deleter)
{
OMNI_THROW_IF_ARG_NULL(data);
OMNI_THROW_IF_FAILED(setNodeData_abi(key, typeId, data, itemSize, itemCount, deleter));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/ConstName.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ConstName.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::ConstName.
#pragma once
#include <carb/Defines.h>
#include <carb/cpp/StringView.h>
#include <omni/String.h>
#include <omni/graph/exec/unstable/Types.h>
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! An immutable name with fast hash based comparisons.
//!
//! @ref ConstName is a hash of a given string. This hash is used for all comparisons. The original string is also
//! stored in this object.
//!
//! Prefer using the overloaded comparison operators (e.g. ==, <, !=) rather than performing comparison operators with
//! the result of @ref toString() or @ref getString(). Using the comparison operators is considerably faster.
//!
//! Comparison of @ref ConstName with `const char*`, @c omni::string, or @c std::string is potentially slow and as such
//! no comparison operators exist to do so. To compare a @ref ConstName with either a `const char*`, @c omni::string,
//! or @c std::string, you must first explicitly create a @ref ConstName from the string.
//!
//! Classes like `carb::RString` and `pxr::TfToken` also utilize a hash of an original string for fast string
//! comparison. In these classes, the hash is simply passed around but the string is stored in a global lookup table.
//! When the original string is needed, the hash is used as a lookup in the global table.
//!
//! Unlike `carb::RString` and `pxr::TfToken`, @ref ConstName avoids the global table. Rather, the original string is
//! stored alongside the hash. The benefit of avoiding the global table is speed. Performance testing revealed that
//! when constructing names of objects during graph traversal, the cost of multiple threads reading and writing to the
//! global tables storing `carb::RString` and `pxr::TfToken`'s strings was a bottleneck.
//!
//! If you need speed in threaded code, use @ref ConstName. If you want to save space, use `carb::RString` or
//! `pxr::TfToken`.
//!
//! The object is ABI-safe. When returning a @ref ConstName or passing a @ref ConstName to/from an ABI method, prefer
//! using a const pointer rather than passing by value.
class ConstName
{
public:
//! Construct from a static compile time string.
template <std::size_t N>
explicit ConstName(const char (&s)[N]) : m_hash(carb::fnv1aHash(s, N - 1)), m_name(s, N - 1)
{
// N-1 so that we don't hash the terminating \0.
}
//! Construct from a @ref carb::cpp::string_view. This constructor also accepts `const char *`.
explicit ConstName(const carb::cpp::string_view& sv) : m_hash(carb::fnv1aHash(sv.data(), sv.size())), m_name(sv)
{
}
//! Construct from a @ref carb::cpp::string_view with an already computed hash.
explicit ConstName(const carb::cpp::string_view& sv, NameHash hash) : m_hash(hash), m_name(sv)
{
}
//! Construct from empty string.
ConstName() noexcept : m_hash(CARB_HASH_STRING(""))
{
}
//! Implementation detail. Ignore.
struct BogusArg
{
};
//! Construct from a @c std::string.
template <typename T>
explicit ConstName(const T& s, std::enable_if_t<std::is_same<std::string, T>::value, BogusArg> = {})
: m_hash(carb::fnv1aHash(s.data(), s.size())), m_name(s)
{
// the enable_if disambiguates which constructor a const char* arg should use.
// the BogusArg type keeps this constructor from being confused with the one with a hash.
}
//! Construct from a @c std::string and a pre-computed hash.
template <typename T>
explicit ConstName(const T& s, NameHash hash, std::enable_if_t<std::is_same<std::string, T>::value, BogusArg> = {})
: m_hash(hash), m_name(s)
{
// the enable_if disambiguates which constructor a const char* arg should use.
}
//! Returns the string used to generate the hash. For debugging purposes only.
const omni::string& getString() const noexcept
{
return m_name;
}
//! Returns the hash used for comparison.
//!
//! Prefer using the overloaded comparison operators (e.g. <, ==) rather than directly calling this method.
constexpr uint64_t getHash() const noexcept
{
return m_hash;
}
//! Converts to a @c std::string. For debugging purposes only.
//!
//! Prefer using @ref getString() over this method, as @ref getString() does not copy any data.
std::string toString() const
{
return std::string(m_name.data(), m_name.size());
}
//! Returns the name as a null-terminated `const char*`.
const char* c_str() const noexcept
{
return m_name.c_str();
}
private:
uint64_t m_hash;
omni::string m_name;
};
//! Compares two @ref ConstName objects.
//!
//! Returns @c true if the hashes are the same.
constexpr bool operator==(const ConstName& lhs, const ConstName& rhs) noexcept
{
return (lhs.getHash() == rhs.getHash());
}
//! Compares a @ref ConstName with a hash.
//!
//! Returns @c true if the hashes are the same.
constexpr bool operator==(const ConstName& lhs, NameHash rhs) noexcept
{
return (lhs.getHash() == rhs);
}
//! Compares a hash with a @ref ConstName
//!
//! Returns @c true if the hashes are the same.
constexpr bool operator==(NameHash lhs, const ConstName& rhs) noexcept
{
return (lhs == rhs.getHash());
}
//! Compares two @ref ConstName objects.
//!
//! Returns @c true if the hashes are not the same.
constexpr bool operator!=(const ConstName& lhs, const ConstName& rhs) noexcept
{
return (lhs.getHash() != rhs.getHash());
}
//! Compares a @ref ConstName with a hash.
//!
//! Returns @c true if the hashes are not the same.
constexpr bool operator!=(const ConstName& lhs, NameHash rhs) noexcept
{
return (lhs.getHash() != rhs);
}
//! Compares a hash with a @ref ConstName
//!
//! Returns @c true if the hashes are not the same.
constexpr bool operator!=(NameHash lhs, const ConstName& rhs) noexcept
{
return (lhs != rhs.getHash());
}
//! Compares two @ref ConstName objects.
//!
//! Returns @c true if the first hash's value is less than the seconds.
constexpr bool operator<(const ConstName& lhs, const ConstName& rhs) noexcept
{
return (lhs.getHash() < rhs.getHash());
}
//! Compares a @ref ConstName with a hash.
//!
//! Returns @c true if the first hash's value is less than the seconds.
constexpr bool operator<(const ConstName& lhs, NameHash rhs) noexcept
{
return (lhs.getHash() < rhs);
}
//! Compares a hash with a @ref ConstName
//!
//! Returns @c true if the first hash's value is less than the seconds.
constexpr bool operator<(NameHash lhs, const ConstName& rhs) noexcept
{
return (lhs < rhs.getHash());
}
//! Output stream operator overload. Outputs the contents of @p str to the stream @p os.
//!
//! @param os Stream to output the string to.
//! @param str The string to output.
//!
//! @return @p os.
//!
//! @throws std::ios_base::failure if an exception is thrown during output.
inline std::ostream& operator<<(std::ostream& os, const ConstName& str)
{
return (os << str.getString());
}
//! Concatenates @c std::string with a @ref ConstName. Returns a @c omni::string.
inline auto operator+(const std::string& lhs, const ConstName& rhs)
{
return lhs + rhs.getString();
}
//! Concatenates @c std::string with a @ref ConstName. Returns a @c omni::string.
//!
//! Concatenates strings.
inline auto operator+(const ConstName& lhs, const std::string& rhs)
{
return lhs.getString() + rhs;
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
namespace std
{
//! Hash specialization for std::string
template <>
struct hash<omni::graph::exec::unstable::ConstName>
{
//! Argument type alias.
using argument_type = omni::graph::exec::unstable::ConstName;
//! Result type alias.
using result_type = std::size_t;
//! Hash operator
result_type operator()(const argument_type& x) const noexcept
{
return x.getHash();
}
};
} // namespace std
|
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderContext.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Common state for graph builders.
//!
//! *TODO* We will use this class to introduce reporting from graph transformation pipeline back to the application.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>
: public omni::graph::exec::unstable::IGraphBuilderContext_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraphBuilderContext")
//! Current construction version.
//!
//! Incremented after each pass pipeline run to include definitions that were created before the run.
omni::graph::exec::unstable::Stamp getConstructionStamp() noexcept;
//! Return owner of all graphs this builder touches
//!
//! The returned @ref omni::graph::exec::unstable::IGraph will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraph* getGraph() noexcept;
//! To be overriden by application specific class to enable reporting from transformation pipeline.
void report(const char* diagnose) noexcept;
//! Run transformation pipeline
void runTransformations(omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Stamp omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderContext_abi>::getConstructionStamp() noexcept
{
return getConstructionStamp_abi();
}
inline omni::graph::exec::unstable::IGraph* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderContext_abi>::getGraph() noexcept
{
return getGraph_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>::report(const char* diagnose) noexcept
{
report_abi(diagnose);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>::runTransformations(
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept
{
runTransformations_abi(nodeGraphDef.get());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/IInvalidationForwarder.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IInvalidationForwarder.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IInvalidationForwarder.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IInvalidationForwarder_abi;
class IInvalidationForwarder;
class ITopology;
//! Interface wrapping a function (possibly with storage) to forward topology invalidation notices.
class IInvalidationForwarder_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IInvalidationForwarder")>
{
protected:
//! Invokes the wrapped function.
//!
//! The given topology must not be @c nullptr.
virtual void invoke_abi(OMNI_ATTR("not_null, throw_if_null") ITopology* topology) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IInvalidationForwarder.
using InvalidationForwarderPtr = omni::core::ObjectPtr<IInvalidationForwarder>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IInvalidationForwarder.gen.h>
//! @copydoc omni::graph::exec::unstable::IInvalidationForwarder_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IInvalidationForwarder
: public omni::core::Generated<omni::graph::exec::unstable::IInvalidationForwarder_abi>
{
};
#include <omni/graph/exec/unstable/ITopology.h>
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IInvalidationForwarder.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/IPassRegistry.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Registry (database) of known @ref omni::graph::exec::unstable::IPass objects.
//!
//! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly accessing
//! methods on this interface.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>
: public omni::graph::exec::unstable::IPassRegistry_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassRegistry")
//! Registers a new pass.
//!
//! @p type is th type of pass being registered (e.g. populate, partition, etc).
//!
//! @p name is the name of the pass. This name is used to deregister the pass (see @ref
//! omni::graph::exec::unstable::IPassRegistry::deregisterPass) so the name should be unique within this registry.
//! Must not be `nullptr`.
//!
//! @p factory is the interface that will instantiate the pass. Must not be `nullptr`.
//!
//! Some pass types (e.g. populate passes) desire to affect only a subset of the nodes and/or definitions in a
//! graph. @p nameToMatch is used to specify the name of the node/definitions the pass wishes to affect. The meaning
//! of this field is pass type dependent. Many pass types ignore @p nameToMatch. Must not be `nullptr`. This method
//! copies the name.
//!
//! Some pass types (e.g. partition passes) are designed such that only a single pass should affect an entity. When
//! multiple passes wish to affect an entity, @p priority can be used to resolve the conflict. The meaning of the
//! priority value is pass type specific. Many pass types ignore @p priority.
//!
//! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly
//! accessing this method.
bool registerPass(omni::graph::exec::unstable::PassType passType,
const char* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IPassFactory> factory,
const omni::graph::exec::unstable::ConstName& nameToMatch,
omni::graph::exec::unstable::PassPriority priority);
//! Deregisters a pass.
//!
//! Returns @c true if the pass was found and removed. Returns @c false if the pass could not be found.
//!
//! If multiple passes were registered with the same name, this method will only remove one of them.
//!
//! When deregistering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly
//! accessing this method.
bool deregisterPass(omni::graph::exec::unstable::PassType passType, const char* name);
//! Returns a sub-registry containing pass of the specified type.
//!
//! The returned @ref omni::graph::exec::unstable::IPassTypeRegistry will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IPassTypeRegistry* getPassTypeRegistry(omni::graph::exec::unstable::PassType type) noexcept;
//! Returns version stamp for the registry.
//!
//! Version is incremented each time the content of registry changes, i.e. pass is added or removed.
omni::graph::exec::unstable::Stamp getStamp() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>::registerPass(
omni::graph::exec::unstable::PassType passType,
const char* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IPassFactory> factory,
const omni::graph::exec::unstable::ConstName& nameToMatch,
omni::graph::exec::unstable::PassPriority priority)
{
OMNI_THROW_IF_ARG_NULL(name);
OMNI_THROW_IF_ARG_NULL(factory);
auto return_ = registerPass_abi(passType, name, factory.get(), &nameToMatch, priority);
return return_;
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>::deregisterPass(
omni::graph::exec::unstable::PassType passType, const char* name)
{
OMNI_THROW_IF_ARG_NULL(name);
auto return_ = deregisterPass_abi(passType, name);
return return_;
}
inline omni::graph::exec::unstable::IPassTypeRegistry* omni::core::Generated<
omni::graph::exec::unstable::IPassRegistry_abi>::getPassTypeRegistry(omni::graph::exec::unstable::PassType type) noexcept
{
return getPassTypeRegistry_abi(type);
}
inline omni::graph::exec::unstable::Stamp omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>::getStamp() noexcept
{
return getStamp_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/IDef.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IDef.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IDef.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/SchedulingInfo.h>
#include <omni/graph/exec/unstable/Status.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IDef;
class IDef_abi;
class ExecutionTask;
//! Base class for all node definitions
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Since definitions can be shared by multiple nodes, and nodes can be executed in parallel, implementations of
//! this interface should expect its methods to be called in parallel.
class IDef_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IDef")>
{
protected:
//! Execute the node definition.
//!
//! See thread safety information in interface description.
virtual Status execute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0;
//! Provide runtime information about scheduling constraints particular task have
//!
//! The provided @ref omni::graph::exec::unstable::ExecutionTask can be used to determine the path of the current
//! definition.
//!
//! The given task must not be @c nullptr.
//!
//! See thread safety information in interface description.
virtual SchedulingInfo getSchedulingInfo_abi(OMNI_ATTR("in, not_null, throw_if_null, ref")
const ExecutionTask* info) noexcept = 0;
//! Return unique definition identifier.
//!
//! See thread safety information in interface description.
virtual OMNI_ATTR("ref") const ConstName* getName_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref omni::graph::exec::unstable::IDef.
using DefPtr = omni::core::ObjectPtr<IDef>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IDef.gen.h>
//! @copydoc omni::graph::exec::unstable::IDef_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IDef : public omni::core::Generated<omni::graph::exec::unstable::IDef_abi>
{
};
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IDef.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/IPassRegistry.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPassRegistry.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPassRegistry.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Stamp.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IPassFactory;
class IPassRegistry;
class IPassRegistry_abi;
class IPassTypeRegistry;
//! Registry (database) of known @ref omni::graph::exec::unstable::IPass objects.
//!
//! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly accessing
//! methods on this interface.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPassRegistry_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPassRegistry")>
{
protected:
//! Registers a new pass.
//!
//! @p type is th type of pass being registered (e.g. populate, partition, etc).
//!
//! @p name is the name of the pass. This name is used to deregister the pass (see @ref
//! omni::graph::exec::unstable::IPassRegistry::deregisterPass) so the name should be unique within this registry.
//! Must not be `nullptr`.
//!
//! @p factory is the interface that will instantiate the pass. Must not be `nullptr`.
//!
//! Some pass types (e.g. populate passes) desire to affect only a subset of the nodes and/or definitions in a
//! graph. @p nameToMatch is used to specify the name of the node/definitions the pass wishes to affect. The meaning
//! of this field is pass type dependent. Many pass types ignore @p nameToMatch. Must not be `nullptr`. This method
//! copies the name.
//!
//! Some pass types (e.g. partition passes) are designed such that only a single pass should affect an entity. When
//! multiple passes wish to affect an entity, @p priority can be used to resolve the conflict. The meaning of the
//! priority value is pass type specific. Many pass types ignore @p priority.
//!
//! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly
//! accessing this method.
virtual bool registerPass_abi(PassType passType,
OMNI_ATTR("c_str, not_null, throw_if_null") const char* name,
OMNI_ATTR("not_null, throw_if_null") IPassFactory* factory,
OMNI_ATTR("in, not_null, throw_if_null, ref") const ConstName* nameToMatch,
PassPriority priority) noexcept = 0;
//! Deregisters a pass.
//!
//! Returns @c true if the pass was found and removed. Returns @c false if the pass could not be found.
//!
//! If multiple passes were registered with the same name, this method will only remove one of them.
//!
//! When deregistering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly
//! accessing this method.
virtual bool deregisterPass_abi(PassType passType,
OMNI_ATTR("c_str, not_null, throw_if_null") const char* name) noexcept = 0;
//! Returns a sub-registry containing pass of the specified type.
//!
//! The returned @ref omni::graph::exec::unstable::IPassTypeRegistry will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("no_acquire") IPassTypeRegistry* getPassTypeRegistry_abi(PassType type) noexcept = 0;
//! Returns version stamp for the registry.
//!
//! Version is incremented each time the content of registry changes, i.e. pass is added or removed.
virtual Stamp getStamp_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref omni::graph::exec::unstable::IPassRegistry.
using PassRegistryPtr = omni::core::ObjectPtr<IPassRegistry>;
//! Returns the singleton pass registry.
inline IPassRegistry* getPassRegistry() noexcept;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPassRegistry.gen.h>
//! @copydoc omni::graph::exec::unstable::IPassRegistry_abi
//!
//! @ingroup groupOmniGraphExecPassRegistration groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPassRegistry
: public omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IPassFactory.h>
#include <omni/graph/exec/unstable/IPassTypeRegistry.h>
//! Returns a singleton containing the pass registry.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
inline omni::graph::exec::unstable::IPassRegistry* omni::graph::exec::unstable::getPassRegistry() noexcept
{
// createType() always calls acquire() and returns an ObjectPtr to make sure release() is called. we don't want to
// hold a ref here to avoid static destruction issues. here we allow the returned ObjectPtr to destruct (after
// calling get()) to release our ref. we know the DLL in which the singleton was created is maintaining a ref and
// will keep the singleton alive for the lifetime of the DLL.
static auto sSingleton = omni::core::createType<IPassRegistry>().get();
return sSingleton;
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPassRegistry.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/Stamp.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Stamp.h
//!
//! @brief Defines omni::graph::exec::unstable::Stamp class.
#pragma once
#include <limits>
#include <string>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Low-level ABI type to represent @ref Stamp.
using Stamp_abi = int16_t;
//! Lazy, light-weight change notification system.
//!
//! The heart of the EF's invalidation system is @ref Stamp and @ref SyncStamp.
//!
//! Stamps track the state/version of a resource. Stamps are implemented as an unsigned number. If the state of a
//! resource changes, the stamp is incremented.
//!
//! Stamps are broken into two parts.
//!
//! The first part is implemented by the @ref Stamp class. As a resource changes, @ref Stamp::next()| s called to
//! denote the new state of the resource. @ref Stamp objects are owned by the resource they track.
//!
//! The second part of stamps is implemented by the @ref SyncStamp class. @ref SyncStamp tracks/synchronizes to the
//! state of a @ref Stamp. @ref SyncStamp objects are owned by the entities that wish to utilize the mutating resource.
//!
//! For example, consider the following example showing how a consumer of a resource can uses stamps to detect when a
//! resource has changed and update cached data:
//!
#ifdef OMNI_GRAPH_EXEC_DOC_BUILD
//! @snippet "../tests.cpp/TestStamp.cpp" ef-docs-stamp-example
#endif
//!
//! By default constructed @ref Stamp and @ref SyncStamp are never in sync, meaning reconstruction will always happen at
//! least once.
//!
//! Stamps are a lazy, light-weight alternative to heavier change notification systems such as callbacks.
//!
//! Stamps use unsigned numbers to detect changes in the tracked resource, relying on overflow behavior to wrap the
//! unsigned number. A check for @ref Stamp::kInvalidStamp is performed during this overflow.
//!
//! Because of the limited bit depth of @ref Stamp, it is possible, though improbable, that a resource at stamp *X*,
//! wraps all the way back to *X* before a @ref SyncStamp tries to synchronize with the stamp. In such a case, the @ref
//! SyncStamp will erroneously believe it is in sync with the resource. Again, this is unlikely, though possible.
//!
//! EF makes extensive use of stamps to detect changes in pass registration, graph topology, and graph construction. See
//! @rstref{Graph Invalidation <ef_graph_invalidation>} to understand how @ref Stamp is used for invalidation in EF.
//!
//! This object is ABI-safe.
class Stamp
{
public:
//! Anonymous structure to define kInvalidStamp.
enum : Stamp_abi
{
kInvalidStamp = -1 //!< Value for an invalid stamp.
};
//! Bump the stamp
void next() noexcept
{
// depending on usage, we may have to turn this into atomic operator
// for now we don't expect this to be needed
m_generation = (m_generation == std::numeric_limits<decltype(m_generation)>::max()) ? 0 : m_generation + 1;
static_assert(offsetof(Stamp, m_generation) == 0, "unexpected offset");
}
//! Make stamp invalid
void invalidate() noexcept
{
m_generation = kInvalidStamp;
}
//! Check if stamp is valid
bool isValid() const noexcept
{
return m_generation != Stamp::kInvalidStamp;
}
//! Equal operator
bool operator==(const Stamp& rhs) const noexcept
{
return (m_generation == rhs.m_generation);
}
//! Not equal operator
bool operator!=(const Stamp& rhs) const noexcept
{
return (m_generation != rhs.m_generation);
}
//! Convert to string for debugging
std::string toString() const // may throw
{
std::string str;
if (isValid())
{
str = std::to_string(m_generation);
}
else
{
str = "INV";
}
return str;
}
private:
Stamp_abi m_generation{ kInvalidStamp }; //!< Stamp storage
friend class SyncStamp;
};
static_assert(std::is_standard_layout<Stamp>::value, "Stamp is expected to be abi safe");
static_assert(2 == sizeof(Stamp), "Stamp is an unexpected size");
//! Watcher of a mutating resource. Observes a resources @ref Stamp and detects if it has changed.
//!
//! Used always in pair with @ref Stamp class to detect changes in a resource. See @ref Stamp's docs for an
//! explanation on how this object is used during invalidation.
class SyncStamp
{
public:
enum
{
kInvalidStamp = Stamp::kInvalidStamp - 1
};
//! Constructor
SyncStamp() noexcept = default;
//! Construct in sync with given stamp
SyncStamp(Stamp id) noexcept : m_syncStamp(id.m_generation)
{
static_assert(offsetof(SyncStamp, m_syncStamp) == 0, "unexpected offset");
}
//! Check if two classes are in sync. Always return false if this instance is having invalid stamp stored.
bool inSync(const Stamp& id) const noexcept
{
if (m_syncStamp == SyncStamp::kInvalidStamp)
return false;
return (m_syncStamp == id.m_generation);
}
//! Check if two sync stamp are in sync
bool inSync(const SyncStamp& syncStamp) const noexcept
{
return (m_syncStamp == syncStamp.m_syncStamp);
}
//! Synchronize this instance with given stamp
void sync(const Stamp& id) noexcept
{
m_syncStamp = id.m_generation;
}
//! Synchronize this instance with given sync stamp
void sync(const SyncStamp& syncStamp) noexcept
{
m_syncStamp = syncStamp.m_syncStamp;
}
//! In one call test and synchronize the stamp. After this call this instance is guaranteed to be in sync with
//! given id.
//!
//! @return False if stamps were in sync and true otherwise.
bool makeSync(const Stamp& id) noexcept
{
if (inSync(id))
return false;
sync(id);
return true;
}
//! Is this sync stamp valid
bool isValid() const noexcept
{
return m_syncStamp != SyncStamp::kInvalidStamp;
}
//! Invalidate stored stamp
void invalidate() noexcept
{
m_syncStamp = SyncStamp::kInvalidStamp;
}
//! Explicit call to convert to Stamp class
Stamp toStamp() const noexcept
{
Stamp id;
if (isValid())
id.m_generation = m_syncStamp;
return id;
}
//! Convert to string for debugging
std::string toString() const // may throw
{
std::string str;
if (isValid())
{
str = std::to_string(m_syncStamp);
}
else
{
str = "INV";
}
return str;
}
private:
Stamp_abi m_syncStamp{ kInvalidStamp }; //!< Stamp storage
};
static_assert(std::is_standard_layout<SyncStamp>::value, "SyncStamp is expected to be abi safe");
static_assert(2 == sizeof(SyncStamp), "SyncStamp is an unexpected size");
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/ScheduleFunction.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ScheduleFunction.h
//!
//! @brief Helpers for @ref omni::graph::exec::unstable::IScheduleFunction.
#pragma once
#include <omni/graph/exec/unstable/IScheduleFunction.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
namespace detail
{
#ifndef DOXYGEN_BUILD
template <typename Fn>
struct ScheduleFunctionHelper
{
static omni::graph::exec::unstable::Status invoke(Fn&& fn)
{
return fn();
}
static auto capture(Fn&& fn)
{
return std::move(fn);
}
};
template <>
struct ScheduleFunctionHelper<IScheduleFunction*&>
{
static omni::graph::exec::unstable::Status invoke(IScheduleFunction* fn)
{
return fn->invoke();
}
static auto capture(IScheduleFunction* fn)
{
return omni::core::borrow(fn);
}
};
template <>
struct ScheduleFunctionHelper<omni::core::ObjectPtr<IScheduleFunction>&>
{
static omni::graph::exec::unstable::Status invoke(omni::core::ObjectPtr<IScheduleFunction>& fn)
{
return fn->invoke();
}
static omni::core::ObjectPtr<IScheduleFunction> capture(omni::core::ObjectPtr<IScheduleFunction>& fn)
{
return std::move(fn);
}
};
#endif
} // namespace detail
//! Helper function to efficiently call an invocable object (i.e. std::function, function ptr, IScheduleFunction*).
template <typename Fn>
omni::graph::exec::unstable::Status invokeScheduleFunction(Fn&& fn)
{
return detail::ScheduleFunctionHelper<Fn>::invoke(std::forward<Fn>(fn));
}
//! Helper function to efficiently capture an invocable object (i.e. std::function, function ptr, IScheduleFunction*).
//!
//! Suitable when capturing the invocable object in a lambda to be passed to a scheduler.
//!
//! Use this function when an @ref IScheduleFunction will be invoked at a later time by a scheduler. This function will
//! call @ref omni::core::IObject::acquire() on the @ref IScheduleFunction.
//!
//! If an invocable object that is not a @ref IScheduleFunction is passed to this function, @c std::move() will be
//! called.
template <typename Fn>
auto captureScheduleFunction(Fn&& fn) -> decltype(detail::ScheduleFunctionHelper<Fn>::capture(std::forward<Fn>(fn)))
{
return detail::ScheduleFunctionHelper<Fn>::capture(std::forward<Fn>(fn));
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDef.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file INodeGraphDef.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::INodeGraphDef.
#pragma once
#include <omni/graph/exec/unstable/IDef.h>
#include <omni/graph/exec/unstable/INodeFactory.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class INode;
class INodeGraphDef_abi;
class INodeGraphDef;
class ITopology;
//! Graph definition. Defines work to be done as a graph.
//!
//! Nodes within a graph represent work to be done. The actual work to be performed is described in a
//! @rstref{definition <ef_definition>}. Each node wanting to perform work points to a defintion.
//!
//! This interface is a subclass of the work definition interface (i.e. @ref omni::graph::exec::unstable::IDef) and
//! extends @ref omni::graph::exec::unstable::IDef with methods to describe work as a graph.
//!
//! Visually:
//!
//! @rst
//!
//! .. image:: /../docs/ef-simple-w-defs.svg
//! :align: center
//!
//! @endrst
//!
//! Above, you can see the two types of definitions: opaque definitions (described by @ref
//! omni::graph::exec::unstable::INodeDef) and graph definitions (described by this interface).
//!
//! Nodes within a graph definition can point to other graph definitions. This composibility is where EF gets its *graph
//! of graphs* moniker.
//!
//! Multiple node's in the execution graph can point to the same instance of a graph definition. This saves both space
//! and graph construction time. However, since each graph definition can be shared, its pointer value cannot be used
//! to uniquely identify its location in the graph. To solve this, when traversing/executing a graph definition, an
//! @ref omni::graph::exec::unstable::ExecutionPath is passed (usually via @ref
//! omni::graph::exec::unstable::ExecutionTask::getUpstreamPath()).
//!
//! When defining new graph types, it is common to create a new implementation of this interface. See @ref
//! omni::graph::exec:unstable::NodeGraphDef for an implementation of this interface that can be easily inherited from.
//! See @rstref{Definition Creation <ef_definition_creation>} for a guide on creating your own graph definition.
//!
//! How a graph definition's nodes are traversed during execution is defined by the definition's @ref
//! omni::graph::exec::unstable::IExecutor. See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth
//! guide on how executors and graph definitions work together during execution.
//!
//! See also @ref omni::graph::exec::unstable::INode, @ref omni::graph::exec::unstable::IExecutor, and @ref
//! omni::graph::exec::unstable::ExecutionTask.
class INodeGraphDef_abi : public omni::core::Inherits<IDef, OMNI_TYPE_ID("omni.graph.exec.unstable.INodeGraphDef")>
{
protected:
//! Return this graph's topology object.
//!
//! Each @ref omni::graph::exec::unstable::INodeGraphDef owns a @ref omni::graph::exec::unstable::ITopology.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology. will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0;
//! Initialize the state of the graph.
//!
//! It is up to the implementation of the graph type to decide whether this call needs to be propagated over all
//! nodes within the graph or a single shared state is owned by the graph.
//!
//! @param rootTask State will be initialized for every instance of this graph. Root task will provide a path to
//! allow discovery of the state. Must not be @c nullptr.
virtual OMNI_ATTR("throw_result") omni::core::Result
initializeState_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* rootTask) noexcept = 0;
//! Pre-execution call can be used to setup the graph state prior to execution or skip entirely the execution.
//!
//! The given task must not be @c nullptr.
virtual Status preExecute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0;
//! Post-execution call can be used to finalize the execution, e.g. transfer computation results to consumers.
//!
//! The given task must not be @c nullptr.
virtual Status postExecute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0;
//! Acquire factory object allowing for allocating new node instances for this node graph def.
//!
//! Provided factory may be empty when graph def doesn't allow allocating new nodes outside of pass that constructed
//! the definition in the first place.
//!
//! Accessing node factory is thread-safe but mutating graphs topology is not. This includes node creation.
virtual INodeFactory* getNodeFactory_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref INodeGraphDef.
using NodeGraphDefPtr = omni::core::ObjectPtr<INodeGraphDef>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/INodeGraphDef.gen.h>
//! @copydoc omni::graph::exec::unstable::INodeGraphDef_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::INodeGraphDef
: public omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>
{
public:
//! Access graph's root node.
//!
//! The returned @ref INode. will *not* have @ref omni::core::IObject::acquire() called before being returned.
inline INode* getRoot() noexcept;
};
#include <omni/graph/exec/unstable/ITopology.h>
inline omni::graph::exec::unstable::INode* omni::graph::exec::unstable::INodeGraphDef::getRoot() noexcept
{
return getTopology()->getRoot();
}
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/INodeGraphDef.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/NodeDef.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file NodeDef.h
//!
//! @brief Declares @ref omni::graph::exec::unstable::INodeDef.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/INodeDef.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::INodeDef
template <typename... Bases>
class NodeDefT : public Implements<Bases...>
{
public:
//! Construct node definition
//!
//! @param definitionName Definition name is considered as a token that transformation passes can register against
//!
//! *TODO* Replace runtime hashing of node definition name to id with a compile time hash generation.
//!
//! May throw.
static omni::core::ObjectPtr<NodeDefT> create(const char* definitionName)
{
OMNI_THROW_IF_ARG_NULL(definitionName);
return omni::core::steal(new NodeDefT(definitionName));
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IDef::execute_abi for @ref NodeDef
//!
//! Returns success without executing anything
Status execute_abi(ExecutionTask* info) noexcept override
{
return Status::eSuccess;
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::getSchedulingInfo_abi for @ref NodeDef
//!
//! Returns serial scheduling
SchedulingInfo getSchedulingInfo_abi(const ExecutionTask* info) noexcept override
{
return SchedulingInfo::eSerial;
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::getName_abi for @ref NodeDef
const ConstName* getName_abi() noexcept override
{
return &m_name;
}
//! Constructor
NodeDefT(const char* definitionName) : m_name{ definitionName }
{
}
private:
ConstName m_name; //!< We associate a name with each opaque definition. This is where we store it.
};
//! Core NodeDef implementation for @ref omni::graph::exec::unstable::INodeDef
using NodeDef = NodeDefT<INodeDef>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutionPath.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ExecutionPath.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::ExecutionPath.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/SmallStack.h>
#include <omni/graph/exec/unstable/Span.h>
#include <omni/graph/exec/unstable/Types.h>
#include <atomic>
#include <initializer_list>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class INode;
//! Path representing a unique location of an instantiated node during execution.
//!
//! The @ref omni::graph::exec::unstable::ExecutionPath class is an efficient utility class used to store the *execution
//! path* of an @ref omni::graph::exec::unstable::INode. There's subtlety to what an execution path is. That subtlety is
//! best explained with a diagram:
//!
//! @rst
//!
//! .. image:: /../docs/ef-execution-path-point-k.svg
//! :align: center
//!
//! @endrst
//!
//! Above, nodes are labelled with lower-case letters (e.g. *a*, *b*, etc.). Node can point to either an
//! @ref omni::graph::exec::unstable::INodeDef (which defines opaque computation) or an @ref
//! omni::graph::exec::unstable::INodeGraphDef (which defines its computation with a subgraph). In the diagram above,
//! @ref omni::graph::exec::unstable::INodeGraphDef objects are labelled with upper-case letters (e.g. *X*, *Y*).
//!
//! Observe that @ref omni::graph::exec::unstable::INodeGraphDef *X* is used by both nodes *e* and *f*. This illustrates
//! that @ref omni::graph::exec::unstable::INodeGraphDef objects can be reused within the graph. This makes sense
//! because @ref omni::graph::exec::unstable::INodeGraphDef is defining computational logic and that logic may be needed
//! in multiple places in the graph (e.g. instancing). Likewise, though not illustrated above, @ref
//! omni::graph::exec::unstable::INodeDef objects can be reused.
//!
//! Consider node *k* above (pointed to by the yellow arrow). When *k* is executing, what is its execution path? One
//! way to describe the path is to store the nodes traversed to get to the node. For instance, */a/c/m/n/h/i/k* could be
//! a likely path. Lets call this representation of a path the *traversal path*.
//!
//! EF (via @ref omni::graph::exec::unstable::ExecutionPath) does not store *traversal paths*. Rather, it uses a much
//! more compact representation called the *execution path*. In the diagram above, the execution path for *k* is
//! */f/p/k*.
//!
//! @ref omni::graph::exec::unstable::ExecutionPath stores enough information to **uniquely identify the node**. That's
//! important, since *k* is being shared in the diagram above. The execution path */e/k* points to the same *k* node's
//! memory but logically */e/k* and */f/p/k* are different nodes. This illustrates the main purpose of this object:
//! @ref omni::graph::exec::unstable::INode, @ref omni::graph::exec::unstable::INodeDef, *and* @ref
//! omni::graph::exec::unstable::INodeGraphDef *objects can not be uniquely identified by their raw pointer value.*
//! @ref omni::graph::exec::unstable::ExecutionPath *must be used to uniquely identify a node.*
//!
//! @ref omni::graph::exec::unstable::ExecutionPath is often used as a key in a key/value store to access a node's state
//! data.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Unless otherwise noted, methods in this class are not thread-safe.
//!
//! To reduce the amount of new paths we create, we only create a new path when entering a node graph definition
//! execution. All tasks generated for computing nodes withing the same node graph will point to the same path.
class ExecutionPath
{
enum : ExecutionPathHash
{
kEmptyPathHash = 0
};
public:
//! Default constructor for an empty path. Consider using sEmpty if you need one.
ExecutionPath() noexcept
{
}
//! Copy constructor
ExecutionPath(const ExecutionPath& src) : m_path{ src.m_path }, m_cachedHash(src.m_cachedHash.load())
{
}
//! Assignment operator
ExecutionPath& operator=(const ExecutionPath& rhs)
{
m_path = rhs.m_path;
m_cachedHash.store(rhs.m_cachedHash.load());
return *this;
}
//! Construct a path for a node (used only at the beginning of the execution).
explicit ExecutionPath(omni::core::ObjectParam<INode> node) noexcept : m_path{ node.get() }
{
OMNI_GRAPH_EXEC_ASSERT(node.get());
}
//! Construct a path from an upstream path and a node. Mostly used when entering a node graph definition.
//!
//! May throw.
ExecutionPath(const ExecutionPath& upPath, omni::core::ObjectParam<INode> node)
: m_path{ upPath.m_path, node.get() }
{
OMNI_GRAPH_EXEC_ASSERT(node.get());
}
//! Convenience method for constructing paths from initializer list.
//!
//! May throw.
explicit ExecutionPath(std::initializer_list<omni::core::ObjectParam<INode>> path)
: m_path{ const_cast<INode**>(reinterpret_cast<INode* const*>(path.begin())),
const_cast<INode**>(reinterpret_cast<const INode* const*>(path.end())) }
{
static_assert(sizeof(INode*) == sizeof(omni::core::ObjectParam<INode>), "unexpected ObjectParam size");
}
private:
ExecutionPath(INode** begin, INode** end) : m_path{ begin, end }
{
}
public:
//! Append a node to the path.
//!
//! The given node is not internally acquired and it is up to the calling code to ensure the node remains alive
//! while in use by this object.
//!
//! May throw.
void push(INode* node)
{
OMNI_GRAPH_EXEC_ASSERT(node);
m_path.push(node);
m_cachedHash = kEmptyPathHash;
}
//! Return a new path with a last node removed
//!
//! May throw.
ExecutionPath copyWithoutTop() const
{
int delta = (m_path.empty() ? 0 : -1);
return ExecutionPath{ const_cast<INode**>(m_path.begin()), const_cast<INode**>(m_path.end() + delta) };
}
//! Compute unique index using pairing function and unique indexes of nodes (within owning topology)
//!
//! This is one strategy to generate a hash for a path. Other hashing strategies can be built outside of the class
//! and used for example when retrieving state from execution context.
//!
//! The result is cached and method is thread-safe.
inline ExecutionPathHash getHash() const noexcept;
//! Compute unique index using pairing function and unique indexes of nodes (within owning topology)
//!
//! @param node Include given node as the last node in the path. This allows us to avoid creating a new path
//! when fetching a state for an execution task.
//!
//! This method is thread-safe.
inline ExecutionPathHash getHashWith(omni::core::ObjectParam<INode> node) const noexcept;
//! Check if path is empty.
bool isEmpty() const noexcept
{
return m_path.empty();
}
//! Access to underlying path container
Span<INode* const> getData() const noexcept
{
return Span<INode* const>{ m_path.begin(), m_path.size() };
}
//! Return the node at the top of the stack.
//!
//! Undefined behavior if the stack is empty.
INode* getTop() const noexcept
{
return m_path.top();
}
//! An instance of an empty path.
//!
//! @warning A different empty path may be returned over multiple calls of this method. Do rely on using a pointer
//! to the returned object to check if another path is the empty path. Rather, use the @ref isEmpty()
//! method to check if a path is empty.
static const ExecutionPath& getEmpty() noexcept
{
static ExecutionPath sPath; // unique per DLL
return sPath;
}
//! Pairing function used by the hashing algorithm
static ExecutionPathHash pairingFunction(ExecutionPathHash a, ExecutionPathHash b)
{
return static_cast<ExecutionPathHash>(((a + b) * (a + b + 1) * 0.5) + b);
}
private:
// Container for nodes forming the path
//
// We use a small stack to reduce the need of heap allocations.
using PathStorage = detail::SmallStack<INode*>;
PathStorage m_path; //!< Path is defined by a series of nodes that we visit during task generation
//! Cache used to accelerate getHash method. We decided to go with mutable version, since we want to preserve
//! the const correctness in places that receive the path, i.e. we don't want anyone to attempt mutating the
//! path from these places. The alternative would be to not provide caching which means we give up performance
//! and that is not acceptable.
mutable std::atomic<ExecutionPathHash> m_cachedHash{ kEmptyPathHash };
};
static_assert(std::is_standard_layout<ExecutionPath>::value, "ExecutionPath is expected to be abi safe");
static_assert(72 == sizeof(ExecutionPath), "ExecutionPath is an unexpected size");
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// includes needed for method implementations
#include <omni/graph/exec/unstable/INode.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
inline ExecutionPathHash ExecutionPath::getHash() const noexcept
{
if (m_path.empty())
{
return kEmptyPathHash;
}
if (m_cachedHash != kEmptyPathHash)
return m_cachedHash;
ExecutionPathHash result = kEmptyPathHash;
for (INode* node : m_path)
{
result = pairingFunction(result, node->getIndexInTopology());
}
// no need for compare and exchange...all threads that potentially computed this cache will generate the same result
// and since write is atomic, all reads will see correct value
m_cachedHash = result;
return result;
}
inline ExecutionPathHash ExecutionPath::getHashWith(omni::core::ObjectParam<INode> node) const noexcept
{
OMNI_GRAPH_EXEC_ASSERT(node.get());
ExecutionPathHash result = getHash();
return pairingFunction(result, node->getIndexInTopology());
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderNode.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Describes a node @ref omni::graph::exec::unstable::IGraphBuilder can manipulate.
//!
//! Only @ref omni::graph::exec::unstable::IGraphBuilder should use @ref omni::graph::exec::unstable::IGraphBuilderNode.
//! One way to think about this interface is that it is a private interface used by
//! @ref omni::graph::exec::unstable::IGraphBuilder to connect instances of @ref omni::graph::exec::unstable::INode.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>
: public omni::graph::exec::unstable::IGraphBuilderNode_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraphBuilderNode")
//! Adds the given node as a parent (i.e. upstream) of this node.
//!
//! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the
//! node persists while in use by this interface.
//!
//! @p parent must not be @c nullptr.
//!
//! It is undefined behavior to add a parent multiple times to a node.
//!
//! This method is not thread safe.
//!
//! May throw.
void _addParent(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent);
//! Removes the given node as a parent.
//!
//! If given node is not a parent, this method returns success.
//!
//! This method is not thread safe.
//!
//! May throw.
void _removeParent(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent);
//! Adds the given node as a child (i.e. downstream) of this node.
//!
//! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the
//! node persists while in use by this interface.
//!
//! @p child must not be @c nullptr.
//!
//! It is undefined behavior to add a child multiple times to a node.
//!
//! This method is not thread safe.
//!
//! May throw.
void _addChild(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child);
//! Removes the given node as a child.
//!
//! If given node is not a child, this method returns success.
//!
//! This method is not thread safe.
//!
//! May throw.
void _removeChild(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child);
//! Remove from the container parent nodes that no longer exist in current topology, i.e are invalid.
//!
//! @ref omni::core::IObject::release() is not called on the invalid nodes.
//!
//! This method is not thread safe.
void _removeInvalidParents() noexcept;
//! Remove from the container child nodes that no longer exist in current topology, i.e are invalid.
//!
//! @ref omni::core::IObject::release() is not called on the invalid nodes.
//!
//! This method is not thread safe.
void _removeInvalidChildren() noexcept;
//! Invalidate all children and parents connections by invalidating the topology this node is sync with.
//!
//! This method is thread safe.
void _invalidateConnections() noexcept;
//! Sets the number of parents who are a part of cycle.
//!
//! This method is not thread safe.
void setCycleParentCount(uint32_t count) noexcept;
//! Sets the definition for this node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeGraphDef().
//!
//! This method is not thread safe.
void _setNodeDef(omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept;
//! Sets the definition for this node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeDef().
//!
//! This method is not thread safe.
void _setNodeGraphDef(omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept;
//! Unsets this node's definition.
//!
//! If the definition is already @c nullptr, this method does nothing.
//!
//! This method is not thread safe.
void _clearDef() noexcept;
//! Access the topology owning this node.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
//!
//! This method is not thread safe.
omni::graph::exec::unstable::ITopology* getTopology() noexcept;
//! Make topology valid for current topology version. Drop all the connections if topology changed.
//!
//! This method is not thread safe.
void validateOrResetTopology() noexcept;
//! Access parent at the given index.
//!
//! If the given index is greater than the parent count, an error is returned.
//!
//! This method is not thread safe.
//!
//! May throw due to internal casting.
//!
//! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentCount().
//!
//! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getParents()
//! for a modern C++ wrapper to this method.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraphBuilderNode* getParentAt(uint64_t index);
//! Returns the number of parents.
//!
//! This method is not thread safe.
uint64_t getParentCount() noexcept;
//! Access child at the given index.
//!
//! If the given index is greater than the parent count, an error is returned.
//!
//! This method is not thread safe.
//!
//! May throw due to internal casting.
//!
//! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildCount().
//!
//! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildren()
//! for a modern C++ wrapper to this method.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraphBuilderNode* getChildAt(uint64_t index);
//! Returns the number of children.
//!
//! This method is not thread safe.
uint64_t getChildCount() noexcept;
//! Returns @c true if the given node is an immediate child of this node.
//!
//! @p node may be @c nullptr.
//!
//! This method is not thread safe.
bool hasChild(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> node) noexcept;
//! Returns @c true if this node is the root of the topology.
//!
//! This method is not thread safe.
bool isRoot() noexcept;
//! Returns the root node of the topology of which this node is a part.
//!
//! This method is not thread safe.
//!
//! May throw due to internal casting.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::core::Result getRoot(omni::graph::exec::unstable::IGraphBuilderNode** out);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_addParent(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent)
{
OMNI_THROW_IF_ARG_NULL(parent);
OMNI_THROW_IF_FAILED(_addParent_abi(parent.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeParent(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent)
{
OMNI_THROW_IF_ARG_NULL(parent);
OMNI_THROW_IF_FAILED(_removeParent_abi(parent.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_addChild(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child)
{
OMNI_THROW_IF_ARG_NULL(child);
OMNI_THROW_IF_FAILED(_addChild_abi(child.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeChild(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child)
{
OMNI_THROW_IF_ARG_NULL(child);
OMNI_THROW_IF_FAILED(_removeChild_abi(child.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeInvalidParents() noexcept
{
_removeInvalidParents_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeInvalidChildren() noexcept
{
_removeInvalidChildren_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_invalidateConnections() noexcept
{
_invalidateConnections_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::setCycleParentCount(uint32_t count) noexcept
{
setCycleParentCount_abi(count);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_setNodeDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept
{
_setNodeDef_abi(nodeDef.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_setNodeGraphDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept
{
_setNodeGraphDef_abi(nodeGraphDef.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_clearDef() noexcept
{
_clearDef_abi();
}
inline omni::graph::exec::unstable::ITopology* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderNode_abi>::getTopology() noexcept
{
return getTopology_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::validateOrResetTopology() noexcept
{
validateOrResetTopology_abi();
}
inline omni::graph::exec::unstable::IGraphBuilderNode* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderNode_abi>::getParentAt(uint64_t index)
{
omni::graph::exec::unstable::IGraphBuilderNode* out;
OMNI_THROW_IF_FAILED(getParentAt_abi(index, &out));
return out;
}
inline uint64_t omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::getParentCount() noexcept
{
return getParentCount_abi();
}
inline omni::graph::exec::unstable::IGraphBuilderNode* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderNode_abi>::getChildAt(uint64_t index)
{
omni::graph::exec::unstable::IGraphBuilderNode* out;
OMNI_THROW_IF_FAILED(getChildAt_abi(index, &out));
return out;
}
inline uint64_t omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::getChildCount() noexcept
{
return getChildCount_abi();
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::hasChild(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> node) noexcept
{
return hasChild_abi(node.get());
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::isRoot() noexcept
{
return isRoot_abi();
}
inline omni::core::Result omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::getRoot(
omni::graph::exec::unstable::IGraphBuilderNode** out)
{
OMNI_THROW_IF_ARG_NULL(out);
auto return_ = getRoot_abi(out);
return return_;
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/IGlobalPass.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for global passes.
//!
//! The purpose of a global pass is to perform global transformations on the graph.
//!
//! This transformation category should be considered as a last resort given its global impact on the topology which
//! prevents threading at the pass pipeline level.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IGlobalPass_abi>
: public omni::graph::exec::unstable::IGlobalPass_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGlobalPass")
//! Call from pass pipeline to apply global graph transformations.
void run(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::graph::exec::unstable::IGlobalPass_abi>::run(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder)
{
OMNI_THROW_IF_FAILED(run_abi(builder.get()));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/IPopulatePass.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for populate passes.
//!
//! Register a populate pass with @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS(). When registering a pass, a "name to
//! match" is also specified. This name is the name of a node or definition on which the registered pass should
//! populate.
//!
//! Populate passes are typically the first pass type to run in the pass pipeline. When a node is encountered during
//! construction, only a single populate pass will get a chance to populate the newly discovered node. If no pass is
//! registered against the node's name, the node definition's name is used to find a population pass to run.
//!
//! Populate pass is allowed to attach a new definition to a node it runs on.
//!
//! Minimal rebuild of the execution graph topology should be considered by the pass each time it runs. Pass pipeline
//! leaves the responsibility of deciding if pass needs to run to the implementation. At minimum it can rely on
//! verifying that topology of @ref omni::graph::exec::unstable::NodeGraphDef it generated before is still valid or
//! @ref omni::graph::exec::unstable::NodeDef has not changed.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPopulatePass_abi>
: public omni::graph::exec::unstable::IPopulatePass_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPopulatePass")
//! Call from pass pipeline to apply graph transformations on a given node (definition or topology).
void run(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::graph::exec::unstable::IPopulatePass_abi>::run(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node)
{
OMNI_THROW_IF_FAILED(run_abi(builder.get(), node.get()));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderContext.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IGraphBuilderContext.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilderContext.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Stamp.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraph;
class IGraphBuilder;
class IGraphBuilderContext;
class IGraphBuilderContext_abi;
class INodeGraphDef;
//! Common state for graph builders.
//!
//! *TODO* We will use this class to introduce reporting from graph transformation pipeline back to the application.
class IGraphBuilderContext_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IGraphBuilderContext")>
{
protected:
//! Current construction version.
//!
//! Incremented after each pass pipeline run to include definitions that were created before the run.
virtual Stamp getConstructionStamp_abi() noexcept = 0;
//! Return owner of all graphs this builder touches
//!
//! The returned @ref omni::graph::exec::unstable::IGraph will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("no_acquire") IGraph* getGraph_abi() noexcept = 0;
//! To be overriden by application specific class to enable reporting from transformation pipeline.
virtual void report_abi(OMNI_ATTR("in, c_str, not_null") const char* diagnose) noexcept = 0;
//! Run transformation pipeline
virtual void runTransformations_abi(OMNI_ATTR("not_null") INodeGraphDef* nodeGraphDef) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IGraphBuilderContext.
using GraphBuilderContextPtr = omni::core::ObjectPtr<IGraphBuilderContext>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IGraphBuilderContext.gen.h>
//! @copydoc omni::graph::exec::unstable::IGraphBuilderContext_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IGraphBuilderContext
: public omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IGraphBuilderContext.gen.h>
|
omniverse-code/kit/include/omni/graph/exec/unstable/IPartitionPass.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for graph partitioning passes.
//!
//! Partition passes are typically run just after population passes and only on newly modified
//! @ref omni::graph::exec::unstable::INodeGraphDef objects. The job of a partition pass is to recognize patterns in the
//! newly populated graph and replace them with a new definition or augment existing one.
//!
//! Partition passes can only mutate the graph from the @ref omni::graph::exec::unstable::IPartitionPass::commit method
//! using provided @ref omni::graph::exec::unstable::IGraphBuilder. This will guarantee that the rest of the pipeline
//! is aware of changes made to the graph and avoid potential threading issues.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>
: public omni::graph::exec::unstable::IPartitionPass_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPartitionPass")
//! Call from pass pipeline to initialize the pass for @p topology.
//!
//! This interface method implementation can't mutate given @p topology. Multiple passes can run concurrently on it.
//!
//! Returns True if initialization was successful and pipeline should issue calls to run and commit.
//! Otherwise this pass will be destroyed and won't participate in partitioning @p topology.
bool initialize(omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology);
//! Call from pass pipeline to discover nodes requiring partitioning.
//!
//! No topology changes are permitted at this point. Multiple passes will get a chance to receive this
//! notification.
//!
//! Call to this method comes from graph traversal that may run multiple passes concurrently.
void run(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node);
//! Call to verify generated partitions and commit new definition/s replacing discovered partitions.
//!
//! Commit of partitions is done serially and in the priority order of the pass. Passes with higher order will get
//! the chance first. This is the only partition pass method that can mutate the graph.
void commit(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>::initialize(
omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology)
{
OMNI_THROW_IF_ARG_NULL(topology);
auto return_ = initialize_abi(topology.get());
return return_;
}
inline void omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>::run(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node)
{
OMNI_THROW_IF_ARG_NULL(node);
run_abi(node.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>::commit(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder)
{
OMNI_THROW_IF_ARG_NULL(builder);
commit_abi(builder.get());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/ui/IOmniGraphUi.h | // Copyright (c) 2021-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/core/IObject.h>
namespace omni
{
namespace graph
{
namespace ui
{
OMNI_DECLARE_INTERFACE(IOmniGraphUi);
class IOmniGraphUi_abi
: public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.ui.IOmniGraphUi")>
{
// No functionality but exists so that we can provide C++ nodes
};
} // namespace ui
} // namespace graph
} // namespace omni
#include "IOmniGraphUi.gen.h" // generated file
|
omniverse-code/kit/include/omni/graph/ui/PyIOmniGraphUI.gen.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindIOmniGraphUi(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::ui::IOmniGraphUi_abi>,
omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::ui::IOmniGraphUi_abi>>, omni::core::IObject>
clsParent(m, "_IOmniGraphUi");
py::class_<omni::graph::ui::IOmniGraphUi, omni::core::Generated<omni::graph::ui::IOmniGraphUi_abi>,
omni::python::detail::PyObjectPtr<omni::graph::ui::IOmniGraphUi>, omni::core::IObject>
cls(m, "IOmniGraphUi");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::ui::IOmniGraphUi>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::ui::IOmniGraphUi>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::ui::IOmniGraphUi instantiation");
}
return tmp;
}));
return omni::python::PyBind<omni::graph::ui::IOmniGraphUi>::bind(cls);
}
|
omniverse-code/kit/include/omni/graph/ui/IOmniGraphUi.gen.h | // Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
template <>
class omni::core::Generated<omni::graph::ui::IOmniGraphUi_abi> : public omni::graph::ui::IOmniGraphUi_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::ui::IOmniGraphUi")
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/image/unstable/any.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <typeinfo>
#include <type_traits>
#include <utility>
namespace omni
{
namespace graph
{
namespace image
{
namespace unstable
{
namespace cpp17
{
// The class describes a type-safe container of a single value of any copy-constructible type.
// This class is a not quite standards conformant implementation of std::any.
// It does not support everything std::any supports, and the API is limited to
// a subset that is actually used currently in the project.
// For example, it is missing constructors using std::in_place_type_t<ValueType>
// disambiguation tags and std::make_any. Additionally, this implementation *does not throw exceptions*.
// Instead, it asserts and logs errors when casts fail.
// The long term intention is we will move to a C++17 compiler, and import the std
// version of this class, removing this code from our codebase. Therefore it is very important that this class
// doesn't do anything that the std can't, though the opposite is permissible.
class any final
{
public:
any()
: vtable(nullptr)
{
}
any(const any& rhs)
: vtable(rhs.vtable)
{
if (rhs.has_value())
{
rhs.vtable->copy(rhs.storage, this->storage);
}
}
any(any&& rhs) noexcept
: vtable(rhs.vtable)
{
if (rhs.has_value())
{
rhs.vtable->move(rhs.storage, this->storage);
rhs.vtable = nullptr;
}
}
~any()
{
this->reset();
}
template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
any(ValueType&& value)
{
static_assert(std::is_copy_constructible<
typename std::decay<ValueType>::type>::value,
"T needs to be copy constructible");
this->construct(std::forward<ValueType>(value));
}
any& operator=(const any& rhs)
{
any(rhs).swap(*this);
return *this;
}
any& operator=(any&& rhs) noexcept
{
any(std::move(rhs)).swap(*this);
return *this;
}
template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
any& operator=(ValueType&& value)
{
static_assert(std::is_copy_constructible<
typename std::decay<ValueType>::type>::value,
"T needs to be copy constructible");
any(std::forward<ValueType>(value)).swap(*this);
return *this;
}
void reset() noexcept
{
if (has_value())
{
this->vtable->destroy(storage);
this->vtable = nullptr;
}
}
bool has_value() const noexcept
{
return this->vtable != nullptr;
}
const std::type_info& type() const noexcept
{
return !has_value() ? typeid(void) : this->vtable->type();
}
void swap(any& rhs) noexcept
{
if (this->vtable != rhs.vtable)
{
any tmp(std::move(rhs));
rhs.vtable = this->vtable;
if (this->vtable != nullptr)
{
this->vtable->move(this->storage, rhs.storage);
}
this->vtable = tmp.vtable;
if (tmp.vtable != nullptr)
{
tmp.vtable->move(tmp.storage, this->storage);
tmp.vtable = nullptr;
}
}
else
{
if (this->vtable != nullptr)
{
this->vtable->swap(this->storage, rhs.storage);
}
}
}
private:
union storage_union
{
using stack_storage_t = typename std::aligned_storage<2 * sizeof(void*), std::alignment_of<void*>::value>::type;
void* dynamic;
stack_storage_t stack;
};
struct vtable_type
{
const std::type_info& (*type)() noexcept;
void(*destroy)(storage_union&) noexcept;
void(*copy)(const storage_union& src, storage_union& dest);
void(*move)(storage_union& src, storage_union& dest) noexcept;
void(*swap)(storage_union& lhs, storage_union& rhs) noexcept;
};
template<typename T>
struct vtable_dynamic
{
static const std::type_info& type() noexcept
{
return typeid(T);
}
static void destroy(storage_union& storage) noexcept
{
delete reinterpret_cast<T*>(storage.dynamic);
}
static void copy(const storage_union& src, storage_union& dest)
{
dest.dynamic = new T(*reinterpret_cast<const T*>(src.dynamic));
}
static void move(storage_union& src, storage_union& dest) noexcept
{
dest.dynamic = src.dynamic;
src.dynamic = nullptr;
}
static void swap(storage_union& lhs, storage_union& rhs) noexcept
{
std::swap(lhs.dynamic, rhs.dynamic);
}
};
template<typename T>
struct vtable_stack
{
static const std::type_info& type() noexcept
{
return typeid(T);
}
static void destroy(storage_union& storage) noexcept
{
reinterpret_cast<T*>(&storage.stack)->~T();
}
static void copy(const storage_union& src, storage_union& dest)
{
new (&dest.stack) T(reinterpret_cast<const T&>(src.stack));
}
static void move(storage_union& src, storage_union& dest) noexcept
{
new (&dest.stack) T(std::move(reinterpret_cast<T&>(src.stack)));
destroy(src);
}
static void swap(storage_union& lhs, storage_union& rhs) noexcept
{
storage_union tmp_storage;
move(rhs, tmp_storage);
move(lhs, rhs);
move(tmp_storage, lhs);
}
};
template<typename T>
struct requires_allocation :
std::integral_constant<bool,
!(std::is_nothrow_move_constructible<T>::value
&& sizeof(T) <= sizeof(storage_union::stack) &&
std::alignment_of<T>::value <= std::alignment_of<
storage_union::stack_storage_t>::value)>
{};
template<typename T>
static vtable_type* vtable_for_type()
{
using VTableType = typename std::conditional<requires_allocation<T>::value, vtable_dynamic<T>, vtable_stack<T>>::type;
static vtable_type table =
{
VTableType::type,
VTableType::destroy,
VTableType::copy, VTableType::move,
VTableType::swap,
};
return &table;
}
protected:
template<typename T>
friend const T* any_cast(const any* operand) noexcept;
template<typename T>
friend T* any_cast(any* operand) noexcept;
bool is_typed(const std::type_info& t) const
{
return is_same(this->type(), t);
}
static bool is_same(const std::type_info& a, const std::type_info& b)
{
return a == b;
}
template<typename T>
const T* cast() const noexcept
{
return requires_allocation<typename std::decay<T>::type>::value ?
reinterpret_cast<const T*>(storage.dynamic) :
reinterpret_cast<const T*>(&storage.stack);
}
template<typename T>
T* cast() noexcept
{
return requires_allocation<typename std::decay<T>::type>::value ?
reinterpret_cast<T*>(storage.dynamic) :
reinterpret_cast<T*>(&storage.stack);
}
private:
storage_union storage;
vtable_type* vtable;
template<typename ValueType, typename T>
typename std::enable_if<requires_allocation<T>::value>::type
do_construct(ValueType&& value)
{
storage.dynamic = new T(std::forward<ValueType>(value));
}
template<typename ValueType, typename T>
typename std::enable_if<!requires_allocation<T>::value>::type
do_construct(ValueType&& value)
{
new (&storage.stack) T(std::forward<ValueType>(value));
}
template<typename ValueType>
void construct(ValueType&& value)
{
using T = typename std::decay<ValueType>::type;
this->vtable = vtable_for_type<T>();
do_construct<ValueType, T>(std::forward<ValueType>(value));
}
};
namespace detail
{
template<typename ValueType>
inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::true_type)
{
return std::move(*p);
}
template<typename ValueType>
inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::false_type)
{
return *p;
}
}
template<typename ValueType>
inline ValueType any_cast(const any& operand)
{
using T = typename std::add_const<typename std::remove_reference<ValueType>::type>::type;
auto p = any_cast<T>(&operand);
if (p == nullptr)
{
CARB_LOG_ERROR("cpp17::any: Unable to cast value of type %s to type %s", operand.type().name(), typeid(T).name());
}
return *p;
}
template<typename ValueType>
inline ValueType any_cast(any& operand)
{
using T = typename std::remove_reference<ValueType>::type;
auto p = any_cast<T>(&operand);
if (p == nullptr)
{
CARB_LOG_ERROR("cpp17::any: Unable to cast value of type %s to type %s", operand.type().name(), typeid(T).name());
}
return *p;
}
template<typename ValueType>
inline ValueType any_cast(any&& operand)
{
using can_move = std::integral_constant<bool,
std::is_move_constructible<ValueType>::value
&& !std::is_lvalue_reference<ValueType>::value>;
using T = typename std::remove_reference<ValueType>::type;
auto p = any_cast<T>(&operand);
if (p == nullptr)
{
CARB_LOG_ERROR("cpp17::any: Unable to cast value of type %s to type %s", operand.type().name(), typeid(T).name());
}
return detail::any_cast_move_if_true<ValueType>(p, can_move());
}
template<typename ValueType>
inline const ValueType* any_cast(const any* operand) noexcept
{
using T = typename std::decay<ValueType>::type;
if (operand && operand->is_typed(typeid(T)))
return operand->cast<ValueType>();
return nullptr;
}
template<typename ValueType>
inline ValueType* any_cast(any* operand) noexcept
{
using T = typename std::decay<ValueType>::type;
if (operand && operand->is_typed(typeid(T)))
return operand->cast<ValueType>();
return nullptr;
}
} // namespace cpp17
} // namespace unstable
} // namespace image
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/image/unstable/ComputeParamsBuilder.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//
// This ABI is unstable and subject to change
/* _ _ _____ ______ _______ __ ______ _ _ _____ ______ ___ _ _____ _____ _____ _ __
| | | |/ ____| ____| /\|__ __| \ \ / / __ \| | | | __ \ / __ \ \ / / \ | | | __ \|_ _|/ ____| |/ /
| | | | (___ | |__ / \ | | \ \_/ / | | | | | | |__) | | | | \ \ /\ / /| \| | | |__) | | | | (___ | ' /
| | | |\___ \| __| / /\ \ | | \ /| | | | | | | _ / | | | |\ \/ \/ / | . ` | | _ / | | \___ \| <
| |__| |____) | |____ / ____ \| | | | | |__| | |__| | | \ \ | |__| | \ /\ / | |\ | | | \ \ _| |_ ____) | . \
\____/|_____/|______| /_/ \_\_| |_| \____/ \____/|_| \_\ \____/ \/ \/ |_| \_| |_| \_\_____|_____/|_|\_|
*/
#pragma once
#include <carb/cudainterop/CudaInterop.h>
#include <carb/renderer/Renderer.h>
#include <omni/fabric/IToken.h>
#include <omni/graph/core/GpuInteropEntryUserData.h>
#include <omni/graph/core/ogn/Database.h>
#include <omni/kit/renderer/IGpuFoundation.h>
#include <rtx/rendergraph/RenderGraphBuilder.h>
#include <rtx/rendergraph/RenderGraphTypes.h>
#include <rtx/resourcemanager/ResourceManager.h>
#include <rtx/resourcemanager/ResourceManagerTypes.h>
#include <rtx/hydra/HydraRenderResults.h>
#include <vector>
#include <tuple>
#include <unordered_map>
#include "any.h"
namespace omni
{
namespace graph
{
namespace image
{
namespace unstable
{
/**
* @brief Structure for holding arbitrary parameters.
*
* The ComputeParams class is used to hold and access arbitrary parameters of various types.
* It allows adding parameters with a specified key and retrieving parameters by their key and type.
*
* Example usage:
* ComputeParams<std::string> params;
* params.add("param1", 42);
* params.add("param2", "hello");
* params.add("param3", 3.14);
*
* int param1Value = params.get<int>("param1");
* std::string param2Value = params.get<std::string>("param2");
* double param3Value = params.get<double>("param3");
*/
template <typename TKey>
class ComputeParams
{
public:
/**
* @brief Constructor.
*
* @param[in] initialCapacity The initial capacity of the container where the parameters are stored.
*/
explicit ComputeParams(std::size_t initialCapacity = 32)
{
m_data.reserve(initialCapacity);
}
/**
* @brief Adds a new entry in the parameter map.
* If an element with the given key is already in the container, it is replaced.
*
* @param[in] key The unique identifier of a parameter value.
* @param[in] value The value of the parameter.
*/
template <typename T>
void add(TKey const& key, T&& value)
{
m_data[key] = std::forward<T>(value);
}
/**
* @brief Gets a value from the parameter map.
*
* The return type must match the type of the value stored for that key. If the type of the stored value does not match the requested type,
* the function logs an error and terminates the program.
*
* @param[in] key The unique identifier of the parameter.
* @return Returns the value of the specified type.
* @exception std::out_of_range if there is no data for the given key
*/
template <typename T>
T const& get(TKey const& key) const
{
return cpp17::any_cast<T const&>(m_data.at(key));
}
/**
* @brief Gets a value from the parameter map.
*
* If there is no value for the given key, or if the value type is different from the requested type, returns nullptr.
*
* @param[in] key The unique identifier of the parameter.
* @return Returns the value of the specified type.
*/
template<typename T>
T const* tryGet(TKey const& key) const noexcept
{
if (m_data.find(key) != m_data.end())
{
auto const& a = m_data.at(key);
return cpp17::any_cast<T const>(&a);
}
return nullptr;
}
/**
* @brief Checks if a key is present in the container.
*
* @param[in] key The unique identifier of the parameter.
* @return Returns true if the key is found, otherwise returns false.
*/
bool hasKey(TKey const& key) const noexcept
{
return m_data.find(key) != m_data.end();
}
private:
std::unordered_map<TKey, cpp17::any> m_data;
};
/**
* @brief A builder class for constructing instances of the ComputeParams class.
*
* The ComputeParamsBuilder provides a fluent interface for building ComputeParams objects.
* It allows setting multiple parameters of different types and creates a ComputeParams object
* with the provided parameter values. The object is intended to be used from Omnigraph nodes.
*
* Example usage:
* ComputeParams<std::string> params = ComputeParamsBuilder<std::string>{gpu, rp, db}
* .addValue("param1", 42)
* .addValue("param2", "hello")
* .addValue("param3", 3.14)
* .build();
*
* The main purpose for this builder is to facilitate the sdheduling of CUDA tasks. For this purpose,
* the builder provides some specialized APIs for adding input AOVs, for allocating new AOVs and for
* scheduling the work on the GPU.
*
* Once built, the ComputeParams instance can be passed to the CUDA task using the scheduleCudaTask function.
*
* Alternatively, the builder can directly build the params and schedule the CUDA task in the same chain of method calls.
*
* Example usage for scheduling CUDA tasks:
* ComputeParamsBuilder<std::string>{ gpu, rp, db }
* .addValue("multiplier", db.inputs.multiplier())
* .addInputTexture("inputAOV", db.inputs.inputAOV(),
* [](cudaMipmappedArray_t cudaPtr, carb::graphics::TextureDesc const* desc, ComputeParams<std::string>& params)
* {
* params.add("width", desc->width);
* params.add("height", desc->height);
* })
* .addOutputTexture("outputAOV", db.inputs.outputAOV(), db.inputs.width(), db.inputs.height(), "TestTexture")
* .scheduleCudaTask("TestCudaTask",
* [](ComputeParams<std::string>* data, cudaStream_t stream)
* {
* auto multiplier = data->get<float>("multiplier");
* auto inputAOV = data->get<cudaMipmappedArray_t>("inputAOV");
* auto outputAOV = data->get<cudaMipmappedArray_t>("outputAOV");
* auto width = data->get<uint32_t>("width");
* auto height = data->get<uint32_t>("height");
*
* // ... call CUDA kernel
* });
*
* Note: after building the ComputeParams of scheduling the CUDA task, the ComputeParamsBuilder instance cannot be modified anymore.
* This restriction is imposed in order to provide the guarantee that the AOV pointers built by the builder are not invalidated by further
* modifications of the render product through the builder API.
*/
template<typename TKey>
class ComputeParamsBuilder
{
public:
/**
* @brief Callback invoked after extracting a texture AOV with a given token from the Render Product.
* Allows adding additional parameters from the TextureDesc of the AOV, such as the width, height, etc.
*/
using TextureDescCallback = std::function<void(cudaMipmappedArray_t, carb::graphics::TextureDesc const*, ComputeParams<TKey>&)>;
/**
* @brief Callback invoked after extracting a buffer AOV with a given token from the Render Product.
* Allows adding additional parameters from the BufferDesc of the AOV, such as the bufferSize.
*/
using BufferDescCallback = std::function<void(cudaMipmappedArray_t, carb::graphics::BufferDesc const*, ComputeParams<TKey>&)>;
/**
* @brief Callback invoked by the builder to explicitly allocate an AOV.
* Allows explicit control over the parameters of the new AOV.
*/
using AllocateAOVCallback = std::function<cudaMipmappedArray_t(ComputeParams<TKey> const&,
omni::graph::core::GpuFoundationsInterfaces*,
omni::usd::hydra::HydraRenderProduct*,
rtx::resourcemanager::SyncScopeId,
uint32_t)>;
/**
* @brief Callback invoked by the builder after the allocation of a new AOV.
* Allows setting fabric attributes of the node.
*/
using PostAllocateAOVCallback = std::function<void(cudaMipmappedArray_t)>;
private:
enum class AOVType
{
Buffer,
Texture
};
template <typename T>
struct AOVParams
{
AOVType aovType;
T key;
omni::fabric::TokenC aovToken;
union
{
carb::graphics::TextureDesc textureDesc;
carb::graphics::BufferDesc bufferDesc;
};
//cpp17::any callback; // cpp17::any does not seem to work with lambdas. std::any should work, but can't use it for now
// TODO: find a better way to define the callbacks
TextureDescCallback inputTextureCb;
BufferDescCallback inputBufferCb;
AllocateAOVCallback allocateAOVCb;
PostAllocateAOVCallback postAllocateCb;
};
public:
/**
* @brief Constructor.
*
* @param[in] gpu The GPU interface.
* @param[in] rp The render product on which the CUDA computation is applied.
* @param[in] db The node database.
* @param[in] initialCapacity The initial capacity of the container where the parameters are stored.
*/
ComputeParamsBuilder(
omni::graph::core::GpuFoundationsInterfaces* gpu,
omni::usd::hydra::HydraRenderProduct* rp,
omni::graph::core::ogn::OmniGraphDatabase& db,
std::size_t initialCapacity = 32)
: m_gpu(gpu)
, m_rp(rp)
, m_db(db)
, m_data(initialCapacity)
, m_deviceIndex(s_invalidDeviceIndex)
, m_buildError(BuildError::NoError)
{
m_inputAOVs.reserve(initialCapacity);
m_outputAOVs.reserve(initialCapacity);
m_outputAOVTokens.reserve(initialCapacity);
}
/**
* @brief Set a parameter value of type T with the specified key.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] value The parameter value.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
template<typename TValue>
ComputeParamsBuilder&& addValue(TKey const& key, TValue&& value) &&
{
m_data.add(key, value);
return std::move(*this);
}
/**
* @brief Add a texture AOV. The AOV is expected to be already allocated.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] cb Callback which can be used to add additional parameters from the texture description of the AOV.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addInputTexture(TKey const& key, omni::fabric::TokenC aovToken, TextureDescCallback const& cb = nullptr) &&
{
m_inputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Texture,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_inputAOVs.back().inputTextureCb = cb;
return std::move(*this);
}
/**
* @brief Add a buffer AOV. The AOV is expected to be already allocated.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] cb Callback which can be used to add additional parameters from the buffer description of the AOV.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addInputBuffer(TKey const& key, omni::fabric::TokenC aovToken, BufferDescCallback const& cb = nullptr) &&
{
m_inputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Buffer,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_inputAOVs.back().inputBufferCb = cb;
return std::move(*this);
}
/**
* @brief Allocates a new texture AOV which will be filled in the CUDA task.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] width The width of the allocated texture.
* @param[in] height The height of the allocated texture.
* @param[in] format The texture format.
* @param[in] debugName A string used to identify the new AOV in the debugger.
* @param[in] postAllocateCb A callback which allows the binding of the new AOV to an output attribute of a node.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addOutputTexture(
TKey const& key,
omni::fabric::TokenC aovToken,
uint32_t width,
uint32_t height,
carb::graphics::Format format,
const char* debugName,
PostAllocateAOVCallback postAllocateCb = nullptr) &&
{
m_outputAOVTokens.emplace_back(aovToken);
m_outputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Texture,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_outputAOVs.back().textureDesc =
carb::graphics::TextureDesc{ carb::graphics::TextureType::e2D,
carb::graphics::kTextureUsageFlagShaderResourceStorage |
carb::graphics::kTextureUsageFlagShaderResource |
carb::graphics::kTextureUsageFlagExportShared,
width,
height,
1,
1,
format,
carb::graphics::SampleCount::e1x,
{ { 0, 0, 0, 0 }, nullptr },
debugName,
nullptr };
m_outputAOVs.back().postAllocateCb = postAllocateCb;
return std::move(*this);
}
/**
* @brief Allocates a new texture AOV which will be filled in the CUDA task.
*
* Allows explicit definition and initialization of the AOV.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] callback The callback where the initialization of the AOV must be done.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addOutputTexture(
TKey const& key,
omni::fabric::TokenC aovToken,
AllocateAOVCallback callback) &&
{
m_outputAOVTokens.emplace_back(aovToken);
m_outputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Texture,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_outputAOVs.back().allocateAOVCb = callback;
return std::move(*this);
}
/**
* @brief Allocates a new buffer AOV which will be filled in the CUDA task.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] bufferSize The size of the allocated buffer.
* @param[in] debugName A string used to identify the new AOV in the debugger.
* @param[in] postAllocateCb A callback which allows the binding of the new AOV to an output attribute of a node.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addOutputBuffer(
TKey const& key,
omni::fabric::TokenC aovToken,
uint32_t bufferSize,
const char* debugName,
PostAllocateAOVCallback postAllocateCb = nullptr) &&
{
m_outputAOVTokens.emplace_back(aovToken);
m_outputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Buffer,
/*.key =*/ key,
/*.aovToken =*/ aovToken,
});
m_outputAOVs.back().bufferDesc = carb::graphics::BufferDesc
{
carb::graphics::kBufferUsageFlagExportShared,
bufferSize,
debugName,
nullptr
};
m_outputAOVs.back().postAllocateCb = postAllocateCb;
return std::move(*this);
}
/**
* @brief Allocates a new buffer AOV which will be filled in the CUDA task.
*
* Allows explicit definition and initialization of the AOV.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] callback The callback where the initialization of the AOV must be done.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addOutputBuffer(
TKey const& key,
omni::fabric::TokenC aovToken,
AllocateAOVCallback callback) &&
{
m_outputAOVTokens.emplace_back(aovToken);
m_outputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Buffer,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_outputAOVs.back().allocateAOVCb = callback;
return std::move(*this);
}
/**
* @brief Builds the final ComputeParams structure.
*
* The following steps are performed when building the final params, in this order:
* 1) The new AOVs are added to the Render Product
* 2) The input AOVs (already allocated) are extracted from the Render Product
* 3) The new AOVs are allocated
*
* This sequence ensures that all the AOV pointers in the resulting ComputeParams structure are valid.
*
* No further values can be added to the builder after this call.
*
* @return Returns the constructed ComputeParams object.
*/
ComputeParams<TKey> build()
{
if (!isValid())
return m_data;
// append the outputs first to avoid further structural changes which will invalidate the AOV pointers
if (!m_outputAOVTokens.empty())
appendUninitializedRenderVars(m_rp, m_outputAOVTokens);
auto rmCtx = reinterpret_cast<rtx::resourcemanager::Context*>(m_gpu->resourceManagerContext);
auto rm = reinterpret_cast<rtx::resourcemanager::ResourceManager*>(m_gpu->resourceManager);
for (auto const& aovParams : m_inputAOVs)
{
auto aovPtr = omni::usd::hydra::getRenderVarFromProduct(m_rp, aovParams.aovToken.token);
if (aovPtr == nullptr)
{
m_db.logWarning("Missing RenderVar %s", m_db.tokenToString(aovParams.aovToken));
continue;
}
if (aovPtr->resource == nullptr)
{
m_db.logWarning("RenderVar %s is an invalid resource.", m_db.tokenToString(aovParams.aovToken));
continue;
}
const uint32_t deviceIndex = rm->getFirstDeviceIndex(*rmCtx, *aovPtr->resource);
if (m_deviceIndex != deviceIndex)
{
if (m_deviceIndex == s_invalidDeviceIndex)
{
m_deviceIndex = deviceIndex;
}
else
{
m_db.logWarning("RenderVar %s has an inconsistend device index (%lu/%lu).",
m_db.tokenToString(aovParams.aovToken), static_cast<unsigned long>(deviceIndex),
static_cast<unsigned long>(m_deviceIndex));
m_buildError = BuildError::InconsistentDeviceIndex;
break;
}
}
switch (aovParams.aovType)
{
case AOVType::Texture:
{
auto cudaPtr = (cudaMipmappedArray_t)rm->getCudaMipmappedArray(*aovPtr->resource, m_deviceIndex);
if (aovParams.inputTextureCb)
{
const auto textureDesc = rm->getTextureDesc(*rmCtx, aovPtr->resource);
aovParams.inputTextureCb(cudaPtr, textureDesc, m_data);
}
m_data.add(aovParams.key, cudaPtr);
break;
}
case AOVType::Buffer:
{
auto cudaPtr = (cudaMipmappedArray_t)rm->getCudaMipmappedArray(*aovPtr->resource, m_deviceIndex);
if (aovParams.inputBufferCb)
{
const auto bufferDesc = rm->getBufferDesc(aovPtr->resource);
aovParams.inputBufferCb(cudaPtr, bufferDesc, m_data);
}
m_data.add(aovParams.key, cudaPtr);
break;
}
}
}
if (m_buildError != BuildError::NoError)
{
return m_data;
}
// the device index is not set, get the index of the first device render variable
if (m_deviceIndex == s_invalidDeviceIndex)
{
for (uint32_t i = 0; i < m_rp->renderVarCnt; i++)
{
if (m_rp->vars[i].isRpResource)
{
m_deviceIndex = rm->getFirstDeviceIndex(*rmCtx, *m_rp->vars[i].resource);
break;
}
}
// the render product has no device render variable, use the render product device index
if (m_deviceIndex == s_invalidDeviceIndex)
{
m_deviceIndex = carb::graphics::DeviceMask(m_rp->deviceMask).getFirstIndex();
}
}
auto iRenderGraph = reinterpret_cast<gpu::rendergraph::IRenderGraph*>(m_gpu->renderGraph);
auto rgBuilder = reinterpret_cast<rtx::rendergraph::RenderGraphBuilder*>(m_gpu->renderGraphBuilder);
auto renderGraph = iRenderGraph->getRenderGraph(m_deviceIndex);
const auto syncScopeId = rgBuilder->getRenderGraphDesc(*renderGraph).syncScopeId;
for (auto const& aovParams : m_outputAOVs)
{
cudaMipmappedArray_t ptr = 0;
if (aovParams.allocateAOVCb != nullptr)
{
// custom AOV allocation
auto cb = aovParams.allocateAOVCb;
ptr = cb(m_data, m_gpu, m_rp, syncScopeId, m_deviceIndex);
}
else
{
// standard AOV allocation
switch (aovParams.aovType)
{
case AOVType::Texture:
ptr = (cudaMipmappedArray_t)allocateRenderVarTexture(
m_gpu, m_rp, syncScopeId, m_deviceIndex, aovParams.aovToken, aovParams.textureDesc);
break;
case AOVType::Buffer:
ptr = (cudaMipmappedArray_t)allocateRenderVarBuffer(
m_gpu, m_rp, syncScopeId, m_deviceIndex, aovParams.aovToken, aovParams.bufferDesc);
break;
default:
break;
}
}
m_data.add(aovParams.key, ptr);
if (aovParams.postAllocateCb != nullptr)
aovParams.postAllocateCb(ptr);
}
return m_data;
}
/**
* @brief Builds the final ComputeParams structure and schedules the CUDA task.
*
* No further values can be added to the builder after this call.
*
* @param[in] renderOpName The name of the render op in the render graph.
* @param[in] computeCuda The entry point to the CUDA computation kernel.
* @return Returns true if the builder is valid and the CUDA task was scheduled, otherwise returns false.
*/
bool scheduleCudaTask(const char* renderOpName,
void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream)) &&;
/**
* @brief Builds the final ComputeParams structure and schedules the CUDA task.
*
* Allows validation of the ComputeParams before scheduling the CUDA task.
* No further values can be added to the builder after this call.
*
* @param[in] renderOpName The name of the render op in the render graph.
* @param[in] computeCuda The entry point to the CUDA computation kernel.
* @param[in] validateCb A callback to validate the parameters before scheduling the CUDA task.
* @return Returns true if the builder is valid, the params are validated by the user callback
* and the CUDA task was scheduled, otherwise returns false.
*/
bool scheduleCudaTask(
const char* renderOpName,
void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream),
bool (*validateCb)(ComputeParams<TKey> const& params)
) &&;
private:
bool isValid() const
{
if (!m_gpu || !m_rp || (m_buildError != BuildError::NoError))
{
CARB_LOG_WARN_ONCE("ComputeParamsBuilder: invalid RenderProduct inputs");
return false;
}
return true;
}
static void appendUninitializedRenderVars(omni::usd::hydra::HydraRenderProduct* rp, const std::vector<omni::fabric::TokenC>& renderVarTokens)
{
using TokenC = omni::fabric::TokenC;
// filter already existing aovs
std::vector<TokenC> filteredRenderVarTokens;
filteredRenderVarTokens.reserve(renderVarTokens.size());
for (const auto token : renderVarTokens)
{
if (!omni::usd::hydra::getRenderVarFromProduct(rp, token.token))
{
filteredRenderVarTokens.emplace_back(token);
}
}
using namespace omni::usd::hydra;
const size_t numRenderVars = filteredRenderVarTokens.size();
HydraRenderVar* newVars = new HydraRenderVar[rp->renderVarCnt + numRenderVars];
const size_t varArraySize = sizeof(HydraRenderVar) * rp->renderVarCnt;
std::memcpy(newVars, rp->vars, varArraySize);
for (size_t i = 0; i < numRenderVars; ++i)
{
newVars[rp->renderVarCnt + i].aov = filteredRenderVarTokens[i].token;
newVars[rp->renderVarCnt + i].isRpResource = true;
newVars[rp->renderVarCnt + i].resource = nullptr;
newVars[rp->renderVarCnt + i].isBufferRpResource = true;
newVars[rp->renderVarCnt + i].isFrameLifetimeRsrc = false;
}
delete[] rp->vars;
rp->vars = newVars;
rp->renderVarCnt += static_cast<uint32_t>(numRenderVars);
}
static uint64_t allocateRenderVarBuffer(omni::graph::core::GpuFoundationsInterfaces* gpu,
omni::usd::hydra::HydraRenderProduct* rp,
rtx::resourcemanager::SyncScopeId syncScopeId,
uint32_t deviceIndex,
omni::fabric::TokenC deviceRenderVarToken,
carb::graphics::BufferDesc const& buffDesc)
{
auto rmCtx = reinterpret_cast<rtx::resourcemanager::Context*>(gpu->resourceManagerContext);
auto rm = reinterpret_cast<rtx::resourcemanager::ResourceManager*>(gpu->resourceManager);
using namespace carb::graphics;
auto deviceRenderVar = omni::usd::hydra::getRenderVarFromProduct(rp, deviceRenderVarToken.token);
CARB_ASSERT(deviceRenderVar && deviceRenderVar->isRpResource && deviceRenderVar->isBufferRpResource);
if (!deviceRenderVar || !deviceRenderVar->isRpResource || !deviceRenderVar->isBufferRpResource)
{
return 0;
}
const rtx::resourcemanager::ResourceDesc resourceDesc = { rtx::resourcemanager::ResourceMode::ePooled,
MemoryLocation::eDevice,
rtx::resourcemanager::ResourceCategory::eOtherBuffer,
rtx::resourcemanager::kResourceUsageFlagCudaShared,
DeviceMask::getDeviceMaskFromIndex(deviceIndex),
deviceIndex,
syncScopeId };
CARB_ASSERT(!deviceRenderVar->resource);
if (!deviceRenderVar->resource)
{
deviceRenderVar->resource = buffDesc.size > 0 ? rm->getResourceFromBufferDesc(*rmCtx, buffDesc, resourceDesc) : nullptr;
deviceRenderVar->isFrameLifetimeRsrc = true;
}
auto cudaDevicePointer =
deviceRenderVar->resource ? rm->getCudaDevicePointer(*deviceRenderVar->resource, deviceIndex) : nullptr;
return reinterpret_cast<uint64_t>(cudaDevicePointer);
}
static uint64_t allocateRenderVarTexture(omni::graph::core::GpuFoundationsInterfaces* gpu,
omni::usd::hydra::HydraRenderProduct* rp,
rtx::resourcemanager::SyncScopeId syncScopeId,
uint32_t deviceIndex,
omni::fabric::TokenC deviceRenderVarToken,
carb::graphics::TextureDesc const& texDesc)
{
auto rmCtx = reinterpret_cast<rtx::resourcemanager::Context*>(gpu->resourceManagerContext);
auto rm = reinterpret_cast<rtx::resourcemanager::ResourceManager*>(gpu->resourceManager);
using namespace carb::graphics;
auto deviceRenderVar = omni::usd::hydra::getRenderVarFromProduct(rp, deviceRenderVarToken.token);
CARB_ASSERT(deviceRenderVar && deviceRenderVar->isRpResource && deviceRenderVar->isBufferRpResource);
if (!deviceRenderVar || !deviceRenderVar->isRpResource || !deviceRenderVar->isBufferRpResource)
{
return 0;
}
const rtx::resourcemanager::ResourceDesc resDesc =
{
rtx::resourcemanager::ResourceMode::ePooled,
carb::graphics::MemoryLocation::eDevice,
rtx::resourcemanager::ResourceCategory::eOtherTexture,
rtx::resourcemanager::kResourceUsageFlagCudaShared,
carb::graphics::DeviceMask::getDeviceMaskFromIndex(deviceIndex),
deviceIndex,
syncScopeId
};
deviceRenderVar->resource = rm->getResourceFromTextureDesc(*rmCtx, texDesc, resDesc);
deviceRenderVar->isBufferRpResource = false;
deviceRenderVar->isFrameLifetimeRsrc = true;
auto cudaDevicePointer = deviceRenderVar->resource ? rm->getCudaDevicePointer(*deviceRenderVar->resource, deviceIndex) : nullptr;
return reinterpret_cast<uint64_t>(cudaDevicePointer);
}
protected:
omni::graph::core::GpuFoundationsInterfaces* m_gpu;
omni::usd::hydra::HydraRenderProduct* m_rp;
omni::graph::core::ogn::OmniGraphDatabase& m_db;
std::vector<AOVParams<TKey>> m_inputAOVs;
std::vector<AOVParams<TKey>> m_outputAOVs;
std::vector<omni::fabric::TokenC> m_outputAOVTokens;
ComputeParams<TKey> m_data;
uint32_t m_deviceIndex;
enum class BuildError
{
NoError,
InconsistentDeviceIndex
} m_buildError;
static constexpr uint32_t s_invalidDeviceIndex = std::numeric_limits<uint32_t>::max();
};
namespace
{
// Temporary structure for passing the params and the computeCuda callback to the cudaInterop lambda.
template <typename TParams>
struct UserData
{
ComputeParams<TParams>* params;
void (*computeCuda)(ComputeParams<TParams>* data, cudaStream_t stream);
};
} // namespace
/**
* @brief Schedule a CUDA task on the post render graph.
*
* @param[in] gpu The GPU interface.
* @param[in] rp The render product on which the CUDA computation is applied.
* @param[in] computeParams The parameters of the computation.
* @param[in] renderOpName The name of the render op in the render graph.
* @param[in] computeCuda The CUDA computation entry point.
*/
template <typename TParams>
inline void scheduleCudaTask(omni::graph::core::GpuFoundationsInterfaces* gpu,
omni::usd::hydra::HydraRenderProduct* rp,
uint32_t deviceIndex,
ComputeParams<TParams> const& computeParams,
const char* renderOpName,
void (*computeCuda)(ComputeParams<TParams>* data, cudaStream_t stream))
{
CARB_ASSERT(gpu);
CARB_ASSERT(rp);
CARB_ASSERT(computeCuda);
auto iRenderGraph = reinterpret_cast<gpu::rendergraph::IRenderGraph*>(gpu->renderGraph);
auto rgBuilder = reinterpret_cast<rtx::rendergraph::RenderGraphBuilder*>(gpu->renderGraphBuilder);
auto renderGraph = iRenderGraph->getRenderGraph(deviceIndex);
auto computeParamsPtr = new ComputeParams<TParams>(std::move(computeParams));
auto cudaData = new UserData<TParams>{ computeParamsPtr, computeCuda };
const rtx::rendergraph::ParamBlockRefs paramBlockRefs{ 0, {} };
rtx::rendergraph::RenderOpParams* renderOpParams = rgBuilder->createParams(*renderGraph, paramBlockRefs);
rtx::rendergraph::addRenderOpLambdaEx(
*rgBuilder, *renderGraph, renderOpName, renderOpParams, rtx::rendergraph::kRenderOpFlagNoAnnotation,
[rgBuilder, cudaData, computeCuda](rtx::rendergraph::RenderOpInputCp renderOpInput)
{
renderOpInput->graphicsMux->cmdCudaInterop(
renderOpInput->commandList,
[](cudaStream_t cudaStream, void* userData) -> void
{
auto cudaData = reinterpret_cast<UserData<TParams>*>(userData);
auto params = cudaData->params;
auto computeCuda = cudaData->computeCuda;
computeCuda(params, cudaStream);
delete params;
delete cudaData;
},
cudaData, carb::graphicsmux::CudaInteropFlags::eNone);
});
}
template<typename TKey>
inline bool ComputeParamsBuilder<TKey>::scheduleCudaTask(const char* renderOpName, void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream)) &&
{
if (!isValid())
return false;
auto computeParams = build();
omni::graph::image::unstable::scheduleCudaTask(m_gpu, m_rp, m_deviceIndex, computeParams, renderOpName, computeCuda);
return true;
}
template <typename TKey>
inline bool ComputeParamsBuilder<TKey>::scheduleCudaTask(
const char* renderOpName,
void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream),
bool (*validateCb)(ComputeParams<TKey> const& params)) &&
{
if (!isValid())
return false;
auto computeParams = build();
if (validateCb && validateCb(computeParams))
{
omni::graph::image::unstable::scheduleCudaTask(
m_gpu, m_rp, m_deviceIndex, computeParams, renderOpName, computeCuda);
return true;
}
return false;
}
} // namespace unstable
} // namespace image
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/IVariable.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
/**
* Object that contains a value that is local to a graph, available from anywhere in the graph
*/
template <>
class omni::core::Generated<omni::graph::core::IVariable_abi> : public omni::graph::core::IVariable_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::IVariable")
/**
* Returns the name of the variable object. The name is derived by
* removing any variable specific prefixes from the underlying attribute.
*
* @return The name of the variable.
*/
const char* getName() noexcept;
/**
* Returns the full path to the variables underlying attribute
*
* @return The full usd path of the variable
*/
const char* getSourcePath() noexcept;
/**
* Returns the type of the variable
*
* @return The type of the variable
*/
omni::graph::core::Type getType() noexcept;
/**
* Returns the category of the variable
*
* @return The category of the variable, or an empty string if it is not set.
*/
const char* getCategory() noexcept;
/**
* Sets the category of the variable
*
* @param[in] category A string representing the variable category
*/
void setCategory(const char* category) noexcept;
/**
* Gets the display name of the variable. By default the display name is the same
* as the variable name.
*
* @return The display name of the variable, or an empty string if it is not set.
*/
const char* getDisplayName() noexcept;
/**
* Set the display name of the variable.
*
* @param[in] displayName A string to set the display name to
*/
void setDisplayName(const char* displayName) noexcept;
/**
* Get the tooltip used for the variable.
*
* @return The tooltip of the variable, or an emtpy string if none is set.
*/
const char* getTooltip() noexcept;
/**
* Set the tooltip used for the variable
*
* @param[in] toolTip A description used as a tooltip.
*/
void setTooltip(const char* toolTip) noexcept;
/**
* Get the scope of the variable. The scope determines which graphs can read and write the value.
*
* @return The scope of the variable.
*/
omni::graph::core::eVariableScope getScope() noexcept;
/**
* Sets the scope of the variable.
*
* @param[in] scope The scope to set on the variable.
*/
void setScope(omni::graph::core::eVariableScope scope) noexcept;
/**
* Returns whether this variable is valid
*
* @return True if the variable is valid, false otherwise
*/
bool isValid() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getName() noexcept
{
return getName_abi();
}
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getSourcePath() noexcept
{
return getSourcePath_abi();
}
inline omni::graph::core::Type omni::core::Generated<omni::graph::core::IVariable_abi>::getType() noexcept
{
return getType_abi();
}
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getCategory() noexcept
{
return getCategory_abi();
}
inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setCategory(const char* category) noexcept
{
setCategory_abi(category);
}
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getDisplayName() noexcept
{
return getDisplayName_abi();
}
inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setDisplayName(const char* displayName) noexcept
{
setDisplayName_abi(displayName);
}
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getTooltip() noexcept
{
return getTooltip_abi();
}
inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setTooltip(const char* toolTip) noexcept
{
setTooltip_abi(toolTip);
}
inline omni::graph::core::eVariableScope omni::core::Generated<omni::graph::core::IVariable_abi>::getScope() noexcept
{
return getScope_abi();
}
inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setScope(omni::graph::core::eVariableScope scope) noexcept
{
setScope_abi(scope);
}
inline bool omni::core::Generated<omni::graph::core::IVariable_abi>::isValid() noexcept
{
return isValid_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/core/BundleAttrib.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// ====================================================================================================
/* _____ _ _ _ _ _
| __ \ | \ | | | | | | | |
| | | | ___ | \| | ___ | |_ | | | |___ ___
| | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \
| |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/
|_____/ \___/ |_| \_|\___/ \__| \____/|___/\___|
This is a temporary interface that can change at any time.
*/
// ====================================================================================================
#include "IDirtyID.h"
#include <omni/graph/core/IBundle.h>
namespace omni
{
namespace graph
{
namespace core
{
class BundlePrim;
class ConstBundlePrim;
using BundleAttribSourceType = uint8_t;
/**
* BundleAttributeSource is used to differentiate between UsdAttributes
* and UsdRelationships.
*
* TODO: Investigate why we can't use eRelationship for this purpose.
*/
enum class BundleAttribSource : BundleAttribSourceType
{
Attribute,
Relationship,
};
/**
* Attribute in bundle primitive.
*
* In contrast to (Const)BundlePrim and (Const)BundlePrims, PrimAttribute uses
* const qualifier to express constness of the attribute.
*
* TODO: Review if const qualifier is appropriate.
*/
class BundleAttrib
{
public:
/**
* Backward compatibility alias.
*/
using SourceType = BundleAttribSourceType;
using Source = BundleAttribSource;
BundleAttrib() = default;
/**
* Read initialization.
*/
BundleAttrib(ConstBundlePrim& prim, omni::graph::core::NameToken name) noexcept;
/**
* Read-Write initialization.
*/
BundleAttrib(BundlePrim& prim,
omni::graph::core::NameToken name,
omni::graph::core::Type type,
size_t arrayElementCount,
BundleAttribSource source) noexcept;
BundleAttrib(BundleAttrib const&) = delete;
BundleAttrib(BundleAttrib&&) noexcept = delete;
BundleAttrib& operator=(BundleAttrib const&) = delete;
BundleAttrib& operator=(BundleAttrib&&) noexcept = delete;
/**
* @return Bundle Primitive where this attribute belongs to.
*/
ConstBundlePrim* getBundlePrim() const noexcept;
/**
* @return Bundle Primitive where this attribute belongs to.
*/
BundlePrim* getBundlePrim() noexcept;
/**
* @return Non const attribute handle of this attribute.
*/
omni::graph::core::AttributeDataHandle handle() noexcept;
/**
* @return Const attribute handle of this attribute.
*/
omni::graph::core::ConstAttributeDataHandle handle() const noexcept;
/**
* @return Name of this attribute.
*/
omni::graph::core::NameToken name() const noexcept;
/**
* @return Type of this attribute.
*/
omni::graph::core::Type type() const noexcept;
/**
* @return Interpolation of this attribute.
*/
omni::graph::core::NameToken interpolation() const noexcept;
/**
* Set interpolation for this attribute.
*
* @return True if operation successful, false otherwise.
*/
bool setInterpolation(omni::graph::core::NameToken interpolation) noexcept;
/**
* Clean interpolation information for this attribute.
*/
void clearInterpolation() noexcept;
[[deprecated("Dirty ID management has been moved to core. Use IBundleChanges.")]]
DirtyIDType dirtyID() const noexcept;
[[deprecated("Setting DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]]
bool setDirtyID(DirtyIDType dirtyID) noexcept
{
return false;
}
[[deprecated("Bumping DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]]
bool bumpDirtyID() noexcept
{
return false;
}
/**
* Set source for this attribute.
*
* @return True if successful, false otherwise.
*/
bool setSource(Source source) noexcept;
/**
* Reset source to default value for this attribute.
*/
void clearSource() noexcept;
/**
* @return True if this attribute is an array attribute.
*/
bool isArray() const noexcept;
/**
* @return Size of this attribute. If attribute is not an array, then size is 1.
*/
size_t size() const noexcept;
/**
* Changes size of this attribute.
*/
void resize(size_t arrayElementCount) noexcept;
/**
* Copy attribute contents from another attribute.
* Destination name is preserved.
*/
void copyContentsFrom(BundleAttrib const& sourceAttr) noexcept;
/**
* @return Internal data as void pointer.
*/
void* getDataInternal() noexcept;
/**
* @return Internal data as void pointer.
*/
void const* getDataInternal() const noexcept;
template <typename T>
T get() const noexcept;
// NOTE: If this is not an array type attribute, this pointer may not be valid once any prim,
// even if it's not the prim containing this attribute, has an attribute added or removed,
// due to how attribute data is stored.
template <typename T>
T* getData() noexcept;
template <typename T>
T const* getData() const noexcept;
template <typename T>
T const* getConstData() const noexcept;
template <typename T>
void set(T const& value) noexcept;
template <typename T>
void set(T const* values, size_t elementCount) noexcept;
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
/**
* @todo First iteration of MPiB didn't use 'eRelationship' type to describe relationships.
* Thus, strange approach was created to treat attribute, that is a relationship as a "source".
*/
Source source() const noexcept;
/**
* @return true if this attribute is data.
*/
bool isAttributeData() const noexcept;
/**
* @return true if this attribute is relationship.
*/
bool isRelationshipData() const noexcept;
/**
* @deprecated IBundle2 interface does not require prefixing, use getName().
*/
omni::graph::core::NameToken prefixedName() const noexcept;
private:
/**
* Remove attribute and its internal data.
*/
void clearContents() noexcept;
omni::graph::core::IConstBundle2* getConstBundlePtr() const noexcept;
omni::graph::core::IBundle2* getBundlePtr() noexcept;
ConstBundlePrim* m_bundlePrim{ nullptr };
// Attribute Definition:
omni::graph::core::NameToken m_name = omni::fabric::kUninitializedToken;
omni::fabric::TypeC m_type;
// Attribute Property Cached Values:
omni::graph::core::NameToken m_interpolation = omni::fabric::kUninitializedToken;
Source m_source { BundleAttribSource::Attribute };
friend class ConstBundlePrims;
friend class BundlePrim;
};
/**
* Do not use! Backward compatibility alias.
*/
using BundleAttributeInfo = BundleAttrib;
} // namespace core
} // namespace graph
} // namespace omni
#include "BundleAttribImpl.h"
|
omniverse-code/kit/include/omni/graph/core/GpuInteropEntryUserData.h | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <unordered_map>
#include <carb/graphics/GraphicsTypes.h>
namespace omni
{
namespace usd
{
namespace hydra
{
struct HydraRenderProduct;
} // namespace hydra
} // namespace usd
namespace graph
{
namespace core
{
// Less than ideal, but GpuInteropCudaEntryUserData + GpuInteropRpEntryUserData
// are filled out by RenderGraphScheduler.cpp and passed to the top level GpuInterop
// CudaEntry or RenderProductEntry nodes marking the head of post-processing chain
// for RTX Hydra Renderer
struct GpuInteropCudaResourceData
{
void* cudaResource;
uint32_t width;
uint32_t height;
uint32_t depthOrArraySize;
uint16_t mipCount;
carb::graphics::Format format;
bool isBuffer;
uint32_t deviceIndex;
};
typedef std::unordered_map<std::string, GpuInteropCudaResourceData> GpuInteropCudaResourceMap;
struct GpuInteropCudaEntryUserData
{
void* cudaStream;
double simTime;
double hydraTime;
int64_t frameId;
int64_t externalTimeOfSimFrame;
GpuInteropCudaResourceMap cudaRsrcMap;
};
// Gpu Foundations initialization inside Kit remains a trainwreck, since unresolved
// we pass
struct GpuFoundationsInterfaces
{
void* graphics;
void* graphicsMux;
void* deviceGroup;
void* renderGraphBuilder;
void* resourceManager;
void* resourceManagerContext;
void* renderGraph;
};
struct GpuInteropRpEntryUserData
{
double simTime;
double hydraTime;
GpuFoundationsInterfaces* gpu;
omni::usd::hydra::HydraRenderProduct* rp;
};
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/OgnWrappers.h | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#pragma message ("OgnWrappers.h is deprecated - include the specific omni/graph/core/ogn/ file you require")
// This file contains simple interface classes which wrap data in the OGN database for easier use
//
// WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code.
// If you call them directly you may have to modify your code when they change.
//
#include <omni/graph/core/CppWrappers.h>
#include <omni/graph/core/iComputeGraph.h>
#include <omni/graph/core/TemplateUtils.h>
#include <omni/graph/core/ogn/Types.h>
#include <omni/graph/core/ogn/StringAttribute.h>
#include <omni/graph/core/ogn/ArrayAttribute.h>
#include <omni/graph/core/ogn/SimpleAttribute.h>
|
omniverse-code/kit/include/omni/graph/core/StringUtils.h | // Copyright (c) 2021-2021 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// This file contains helpful string utilities that can be implemented entirely as inlines, preventing the
// need for a bunch of tiny little extensions.
#include <carb/logging/Log.h>
#include <omni/graph/core/Type.h>
#include <omni/graph/core/PreUsdInclude.h>
#include <pxr/base/tf/token.h>
#include <omni/graph/core/PostUsdInclude.h>
#include <string>
#include <vector>
// snprintf becomes _snprintf on Windows, but we want to use std::snprintf
#ifdef HAVE_SNPRINTF
# undef snprintf
#endif
// The namespace is merely to ensure uniqueness. There's nothing inherently associated with OmniGraph in here
namespace omni {
namespace graph {
namespace core {
// ==============================================================================================================
inline void tokenizeString(const char* input, const std::string& separator, std::vector<pxr::TfToken> & output)
{
std::string remainder = input;
size_t separatorLocation = remainder.find(separator);
while (separatorLocation != std::string::npos)
{
std::string tokenStr = remainder.substr(0, separatorLocation);
output.emplace_back(tokenStr);
remainder = remainder.substr(separatorLocation + separator.size());
separatorLocation = remainder.find(separator);
}
if (!remainder.empty())
{
output.emplace_back(remainder);
}
}
// ==============================================================================================================
// This is like tokenizeString, except returns a vector of strings, not tokens
inline std::vector<std::string> splitString(const char* string, char delimiter)
{
std::vector<std::string> strings;
const char* prev_pos = string;
while (*string++)
{
char ch = *string;
if (ch == delimiter)
{
strings.push_back(std::string(prev_pos, string));
prev_pos = string + 1;
}
}
if (prev_pos != string)
strings.push_back(std::string(prev_pos, string - 1));
return strings;
}
// ==============================================================================================================
// Return a formatted string.
// On error will return an empty string.
template <typename... Args>
std::string formatString(const char* format, Args&&... args)
{
int fmtSize = std::snprintf(nullptr, 0, format, args...) + 1; // Extra space for '\0'
if (fmtSize <= 0)
{
CARB_LOG_ERROR("Error formating string %s", format);
return {};
}
auto size = static_cast<size_t>(fmtSize);
auto buf = std::make_unique<char[]>(size);
std::snprintf(buf.get(), size, format, args...);
return std::string(buf.get(), buf.get() + size - 1); // We don't want the '\0' inside
}
//early version of GCC emit a warning if the "format" string passed to "std::snprintf" does not contain any formatting character
// Specialize the function for this use case and prevent that warning
inline std::string formatString(const char* format)
{
return std::string(format);
}
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/IConstBundle.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Provide read only access to recursive bundles.
//!
template <>
class omni::core::Generated<omni::graph::core::IConstBundle2_abi> : public omni::graph::core::IConstBundle2_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::IConstBundle2")
//! Return true if this bundle is valid, false otherwise.
bool isValid() noexcept;
//! Return the context of this bundle.
omni::graph::core::GraphContextObj getContext() noexcept;
//! Return Handle to this bundle. Invalid handle is returned if this bundle is invalid.
omni::graph::core::ConstBundleHandle getConstHandle() noexcept;
//! Return full path of this bundle.
carb::flatcache::PathC getPath() noexcept;
//! Return name of this bundle
omni::graph::core::NameToken getName() noexcept;
//! Return handle to the parent of this bundle. Invalid handle is returned if bundle has no parent.
omni::graph::core::ConstBundleHandle getConstParentBundle() noexcept;
//! @brief Get the names and types of all attributes in this bundle.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when names and types are `nullptr`. When in this mode, *nameAndTypeCount
//! will be populated with the number of attributes in the bundle.
//!
//! **Get mode** is enabled when names or types is not `nullptr`. Upon entering the function, *nameAndTypeCount
//! stores the number of entries in names and types. In **Get mode** names are not nullptr, names array is populated
//! with attribute names. In **Get mode** types are not nullptr, types array is populated with attribute types.
//!
//! @param names The names of the attributes.
//! @param types The types of the attributes.
//! @param nameAndTypeCount must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getAttributeNamesAndTypes(omni::graph::core::NameToken* const names,
omni::graph::core::Type* const types,
size_t* const nameAndTypeCount) noexcept;
//! @brief Get read only handles to all attributes in this bundle.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when attributes is `nullptr`. When in this mode, *attributeCount
//! will be populated with the number of attributes in the bundle.
//!
//! **Get mode** is enabled when attributes is not `nullptr`. Upon entering the function, *attributeCount
//! stores the number of entries in attributes.
//! In **Get mode** attributes are not nullptr, attributes array is populated with attribute handles in the bundle.
//!
//! @param attributes The buffer to store handles of the attributes in this bundle.
//! @param attributeCount Size of attributes buffer. Must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstAttributes(omni::graph::core::ConstAttributeDataHandle* const attributes,
size_t* const attributeCount) noexcept;
//! @brief Search for read only handles of the attribute in this bundle by using attribute names.
//!
//! @param names The name of the attributes to be searched for.
//! @param nameCount Size of names buffer.
//! @param attributes The buffer to store handles of the attributes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstAttributesByName(const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::ConstAttributeDataHandle* const attributes) noexcept;
//! @brief Get read only handles to all child bundles in this bundle.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when bundles is `nullptr`. When in this mode, *bundleCount
//! will be populated with the number of bundles in the bundle.
//!
//! **Get mode** is enabled when bundles is not `nullptr`. Upon entering the function, *bundleCount
//! stores the number of entries in bundles.
//! In **Get mode** bundles are not nullptr, bundles array is populated with bundle handles in the bundle.
//!
//! @param bundles The buffer to save child bundle handles.
//! @param bundleCount Size of the bundles buffer. Must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstChildBundles(omni::graph::core::ConstBundleHandle* const bundles,
size_t* const bundleCount) noexcept;
//! @brief Get read only handle to child bundle by index.
//!
//! @param bundleIndex Bundle index in range [0, childBundleCount).
//! @param bundle Handle under the index. If bundle index is out of range, then invalid handle is returned.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstChildBundle(size_t bundleIndex,
omni::graph::core::ConstBundleHandle* const bundle) noexcept;
//! @brief Lookup for read only handles to child bundles under specified names.
//!
//! For children that are not found invalid handles are returned.
//!
//! @param names The names of the child bundles in this bundle.
//! @param nameCount The number of child bundles to be searched.
//! @param foundBundles Output handles to the found bundles.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstChildBundlesByName(const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::ConstBundleHandle* const foundBundles) noexcept;
//! Return Const Bundle Handle to Metadata Storage
omni::graph::core::ConstBundleHandle getConstMetadataStorage() noexcept;
//! @brief Get the names and types of all bundle metadata fields in this bundle.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when fieldNames and fieldTypes are `nullptr`. When in this mode, *fieldCount
//! will be populated with the number of metadata fields in this bundle.
//!
//! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function,
//! *fieldCount stores the number of entries in fieldNames and @p fieldTypes.
//!
//! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names.
//! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types.
//!
//! @param fieldNames Output field names in this bundle.
//! @param fieldTypes Output field types in this bundle.
//! @param fieldCount must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getBundleMetadataNamesAndTypes(omni::graph::core::NameToken* const fieldNames,
omni::graph::core::Type* const fieldTypes,
size_t* const fieldCount) noexcept;
//! @brief Search for field handles in this bundle by using field names.
//!
//!@param fieldNames Name of bundle metadata fields to be searched for.
//!@param fieldCount Size of fieldNames and bundleMetadata arrays.
//!@param bundleMetadata Handle to metadata fields in this bundle.
//!@return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstBundleMetadataByName(const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::ConstAttributeDataHandle* const bundleMetadata) noexcept;
//! @brief Get the names and types of all attribute metadata fields in the attribute.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when fieldNames and @p fieldTypes are `nullptr`. When in this mode, *fieldCount
//! will be populated with the number of metadata fields in the attribute.
//!
//! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function,
//! *fieldCount stores the number of entries in fieldNames and fieldTypes.
//!
//! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names.
//! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types.
//!
//! @param attribute Name of the attribute.
//! @param fieldNames Output field names in the attribute.
//! @param fieldTypes Output field types in the attribute.
//! @param fieldCount must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getAttributeMetadataNamesAndTypes(omni::graph::core::NameToken attribute,
omni::graph::core::NameToken* const fieldNames,
omni::graph::core::Type* const fieldTypes,
size_t* const fieldCount) noexcept;
//! @brief Search for read only field handles in the attribute by using field names.
//!
//! @param attribute The name of the attribute.
//! @param fieldNames The names of attribute metadata fields to be searched for.
//! @param fieldCount Size of fieldNames and attributeMetadata arrays.
//! @param attributeMetadata Handles to attribute metadata fields in the attribute.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstAttributeMetadataByName(
omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::ConstAttributeDataHandle* const attributeMetadata) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::core::IConstBundle2_abi>::isValid() noexcept
{
return isValid_abi();
}
inline omni::graph::core::GraphContextObj omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getContext() noexcept
{
return getContext_abi();
}
inline omni::graph::core::ConstBundleHandle omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstHandle() noexcept
{
return getConstHandle_abi();
}
inline carb::flatcache::PathC omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getPath() noexcept
{
return getPath_abi();
}
inline omni::graph::core::NameToken omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getName() noexcept
{
return getName_abi();
}
inline omni::graph::core::ConstBundleHandle omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstParentBundle() noexcept
{
return getConstParentBundle_abi();
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getAttributeNamesAndTypes(
omni::graph::core::NameToken* const names, omni::graph::core::Type* const types, size_t* const nameAndTypeCount) noexcept
{
return getAttributeNamesAndTypes_abi(names, types, nameAndTypeCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributes(
omni::graph::core::ConstAttributeDataHandle* const attributes, size_t* const attributeCount) noexcept
{
return getConstAttributes_abi(attributes, attributeCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributesByName(
const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::ConstAttributeDataHandle* const attributes) noexcept
{
return getConstAttributesByName_abi(names, nameCount, attributes);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundles(
omni::graph::core::ConstBundleHandle* const bundles, size_t* const bundleCount) noexcept
{
return getConstChildBundles_abi(bundles, bundleCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundle(
size_t bundleIndex, omni::graph::core::ConstBundleHandle* const bundle) noexcept
{
return getConstChildBundle_abi(bundleIndex, bundle);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundlesByName(
const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::ConstBundleHandle* const foundBundles) noexcept
{
return getConstChildBundlesByName_abi(names, nameCount, foundBundles);
}
inline omni::graph::core::ConstBundleHandle omni::core::Generated<
omni::graph::core::IConstBundle2_abi>::getConstMetadataStorage() noexcept
{
return getConstMetadataStorage_abi();
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getBundleMetadataNamesAndTypes(
omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept
{
return getBundleMetadataNamesAndTypes_abi(fieldNames, fieldTypes, fieldCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstBundleMetadataByName(
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::ConstAttributeDataHandle* const bundleMetadata) noexcept
{
return getConstBundleMetadataByName_abi(fieldNames, fieldCount, bundleMetadata);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getAttributeMetadataNamesAndTypes(
omni::graph::core::NameToken attribute,
omni::graph::core::NameToken* const fieldNames,
omni::graph::core::Type* const fieldTypes,
size_t* const fieldCount) noexcept
{
return getAttributeMetadataNamesAndTypes_abi(attribute, fieldNames, fieldTypes, fieldCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributeMetadataByName(
omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::ConstAttributeDataHandle* const attributeMetadata) noexcept
{
return getConstAttributeMetadataByName_abi(attribute, fieldNames, fieldCount, attributeMetadata);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/core/ISchedulingHints2.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/core/IObject.h>
#include <omni/core/Omni.h>
#include <omni/inspect/IInspector.h>
#include <omni/graph/core/ISchedulingHints.h>
namespace omni
{
namespace graph
{
namespace core
{
//! The purity of the node implementation. For some context, a "pure" node is
//! one whose initialize, compute, and release methods are entirely deterministic,
//! i.e. they will always produce the same output attribute values for a given set
//! of input attribute values, and do not access, rely on, or otherwise mutate data
//! external to the node's scope
enum class ePurityStatus
{
//! Node is assumed to not be pure
eImpure,
//! Node can be considered pure if explicitly specified by the node author
ePure
};
//! Declare the ISchedulingHints2 interface definition
OMNI_DECLARE_INTERFACE(ISchedulingHints2);
//! Interface extension for ISchedulingHints that adds a new "pure" hint
class ISchedulingHints2_abi
: public omni::core::Inherits<ISchedulingHints, OMNI_TYPE_ID("omni.graph.core.ISchedulingHints2")>
{
protected:
/**
* Get the flag describing the node's purity state.
*
* @returns Value of the PurityStatus flag.
*/
virtual ePurityStatus getPurityStatus_abi() noexcept = 0;
/**
* Set the flag describing the node's purity status.
*
* @param[in] newPurityStatus New value of the PurityStatus flag.
*/
virtual void setPurityStatus_abi(ePurityStatus newPurityStatus) noexcept = 0;
};
} // namespace core
} // namespace graph
} // namespace omni
#include "ISchedulingHints2.gen.h"
//! @cond Doxygen_Suppress
//!
//! API part of the scheduling hints 2 interface
//! @copydoc omni::graph::core::ISchedulingHints2_abi
OMNI_DEFINE_INTERFACE_API(omni::graph::core::ISchedulingHints2)
//! @endcond
{
public:
//! @copydoc omni::graph::core::ISchedulingHints2::getPurityStatus_abi
inline omni::graph::core::ePurityStatus getPurityStatus() noexcept
{
return getPurityStatus_abi();
}
//! @copydoc omni::graph::core::ISchedulingHints2::setPurityStatus_abi
inline void setPurityStatus(omni::graph::core::ePurityStatus newPurityStatus) noexcept
{
setPurityStatus_abi(newPurityStatus);
}
};
|
omniverse-code/kit/include/omni/graph/core/PyISchedulingHints2.gen.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindePurityStatus(py::module& m)
{
py::enum_<omni::graph::core::ePurityStatus> e(
m, "ePurityStatus", R"OMNI_BIND_RAW_(The purity of the node implementation. For some context, a "pure" node is
one whose initialize, compute, and release methods are entirely deterministic,
i.e. they will always produce the same output attribute values for a given set
of input attribute values, and do not access, rely on, or otherwise mutate data
external to the node's scope)OMNI_BIND_RAW_");
e.value("E_IMPURE", omni::graph::core::ePurityStatus::eImpure,
R"OMNI_BIND_RAW_(Node is assumed to not be pure)OMNI_BIND_RAW_");
e.value("E_PURE", omni::graph::core::ePurityStatus::ePure,
R"OMNI_BIND_RAW_(Node can be considered pure if explicitly specified by the node author)OMNI_BIND_RAW_");
return e;
}
auto bindISchedulingHints2(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>,
omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>>,
omni::core::Api<omni::graph::core::ISchedulingHints_abi>>
clsParent(m, "_ISchedulingHints2");
py::class_<omni::graph::core::ISchedulingHints2, omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>,
omni::python::detail::PyObjectPtr<omni::graph::core::ISchedulingHints2>,
omni::core::Api<omni::graph::core::ISchedulingHints_abi>>
cls(m, "ISchedulingHints2",
R"OMNI_BIND_RAW_(Interface extension for ISchedulingHints that adds a new "pure" hint)OMNI_BIND_RAW_");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::core::ISchedulingHints2>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::core::ISchedulingHints2>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::core::ISchedulingHints2 instantiation");
}
return tmp;
}));
cls.def_property("purity_status", &omni::graph::core::ISchedulingHints2::getPurityStatus,
&omni::graph::core::ISchedulingHints2::setPurityStatus);
return omni::python::PyBind<omni::graph::core::ISchedulingHints2>::bind(cls);
}
|
omniverse-code/kit/include/omni/graph/core/IBundle.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Provide read write access to recursive bundles.
//!
template <>
class omni::core::Generated<omni::graph::core::IBundle2_abi> : public omni::graph::core::IBundle2_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::IBundle2")
//! Return handle to this bundle. Invalid handle is returned if this bundle is invalid.
omni::graph::core::BundleHandle getHandle() noexcept;
//! Return parent of this bundle, or invalid handle if there is no parent.
omni::graph::core::BundleHandle getParentBundle() noexcept;
//! @brief Get read-write handles to all attributes in this bundle.
//!
//! @copydetails IConstBundle2_abi::getConstAttributes_abi
omni::core::Result getAttributes(omni::graph::core::AttributeDataHandle* const attributes,
size_t* const attributeCount) noexcept;
//! @brief Searches for read-write handles of the attribute in this bundle by using attribute names.
//!
//! @copydetails IConstBundle2_abi::getConstAttributesByName_abi
omni::core::Result getAttributesByName(const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::AttributeDataHandle* const attributes) noexcept;
//! @brief Get read write handles to all child bundles in this bundle.
//!
//! @copydetails IConstBundle2_abi::getConstChildBundles_abi
omni::core::Result getChildBundles(omni::graph::core::BundleHandle* const bundles, size_t* const bundleCount) noexcept;
//! @brief Get read write handle to child bundle by index.
//!
//! @copydetails IConstBundle2_abi::getConstChildBundle_abi
omni::core::Result getChildBundle(size_t bundleIndex, omni::graph::core::BundleHandle* const bundle) noexcept;
//! @brief Lookup for read write handles to child bundles under specified names.
//!
//! @copydetails IConstBundle2_abi::getConstChildBundlesByName_abi
omni::core::Result getChildBundlesByName(const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::BundleHandle* const foundBundles) noexcept;
//! @brief Create new attributes by copying existing.
//!
//! Source attribute handles' data and metadata are copied. If a handle is invalid,
//! then its source is ignored.
//! Created attributes are owned by this bundle.
//!
//! @param newNames The names for the new attributes, if `nullptr` then names are taken from the source attributes.
//! @param sourceAttributes Handles to attributes whose data type is to be copied.
//! @param attributeCount Number of attributes to be copied.
//! @param overwrite An option to overwrite existing attributes.
//! @param copiedAttributes Output handles to the newly copied attributes. Can be `nullptr` if no output is
//! required.
//! @param copiedCount Number of successfully copied attributes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result copyAttributes(const omni::graph::core::NameToken* const newNames,
const omni::graph::core::ConstAttributeDataHandle* const sourceAttributes,
size_t attributeCount,
bool overwrite,
omni::graph::core::AttributeDataHandle* const copiedAttributes,
size_t* const copiedCount) noexcept;
//! @brief Create attributes based on provided names and types.
//!
//! Created attributes are owned by this bundle.
//!
//! @param names The names of the attributes.
//! @param types The types of the attributes.
//! @param elementCount Number of elements in the array, can be `nullptr` if attribute is not an array.
//! @param attributeCount Number of attributes to be created.
//! @param createdAttributes Output handles to the newly created attributes. Can be nullptr if no output is
//! required.
//! @param createdCount Number of successfully created attributes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result createAttributes(const omni::graph::core::NameToken* const names,
const omni::graph::core::Type* const types,
const size_t* const elementCount,
size_t attributeCount,
omni::graph::core::AttributeDataHandle* const createdAttributes,
size_t* const createdCount) noexcept;
//! @brief Use attribute handles as pattern to create new attributes.
//!
//! The name and type for new attributes are taken from pattern attributes, data and metadata is not copied.
//! If pattern handle is invalid, then attribute creation is skipped.
//! Created attributes are owned by this bundle.
//!
//! @param patternAttributes Attributes whose name and type is to be used to create new attributes.
//! @param patternCount Number of attributes to be created.
//! @param createdAttributes Output handles to the newly created attributes. Can be nullptr if no output is
//! required.
//! @param createdCount Number of successfully created attributes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result createAttributesLike(const omni::graph::core::ConstAttributeDataHandle* const patternAttributes,
size_t patternCount,
omni::graph::core::AttributeDataHandle* const createdAttributes,
size_t* const createdCount) noexcept;
//! @brief Create immediate child bundles under specified names in this bundle.
//!
//! Only immediate children are created. This method does not work recursively.
//! If name token is invalid, then child bundle creation is skipped.
//! Created bundles are owned by this bundle.
//!
//! @param names New children names in this bundle.
//! @param nameCount Number of bundles to be created.
//! @param createdBundles Output handles to the newly created bundles. Can be nullptr if no output is required.
//! @param createdCount Number of successfully created child bundles.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result createChildBundles(const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::BundleHandle* const createdBundles,
size_t* const createdCount) noexcept;
//! <b>Feature not implemented yet.</b>
//!
//! @brief Add a set of attributes to this bundle as links.
//!
//! Added attributes are links to other attributes that are part of another bundle.
//! If target handle is invalid, then linking is skipped.
//! The links are owned by this bundle, but targets of the links are not.
//! Removing links from this bundle does not destroy the data links point to.
//!
//! @param linkNames The names for new links.
//! @param targetAttributes Handles to attributes whose data is to be added.
//! @param attributeCount Number of attributes to be added.
//! @param linkedAttributes Output handles to linked attributes. Can be nullptr if no output is required.
//! @param linkedCount Number of attributes successfully linked.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result linkAttributes(const omni::graph::core::NameToken* const linkNames,
const omni::graph::core::ConstAttributeDataHandle* const targetAttributes,
size_t attributeCount,
omni::graph::core::AttributeDataHandle* const linkedAttributes,
size_t* const linkedCount) noexcept;
//! @brief Copy bundle data and metadata from the source bundle to this bundle.
//!
//! If source handle is invalid, then operation is skipped.
//!
//! @param sourceBundle Handle to bundle whose data is to be copied.
//! @param overwrite An option to overwrite existing content of the bundle.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result copyBundle(const omni::graph::core::ConstBundleHandle& sourceBundle, bool overwrite) noexcept;
//! @brief @brief Create new child bundles by copying existing.
//!
//! Source bundle handles' data and metadata are copied. If a handle is invalid,
//! then its source is ignored.
//! Created bundles are owned by this bundle.
//!
//! @param newNames Names for new children, if `nullptr` then names are taken from the source bundles.
//! @param sourceBundles Handles to bundles whose data is to be copied.
//! @param bundleCount Number of bundles to be copied.
//! @param overwrite An option to overwrite existing child bundles.
//! @param copiedBundles Output handles to the newly copied bundles. Can be `nullptr` if no output is required.
//! @param copiedCount Number of successfully copied child bundles.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result copyChildBundles(const omni::graph::core::NameToken* const newNames,
const omni::graph::core::ConstBundleHandle* const sourceBundles,
size_t bundleCount,
bool overwrite,
omni::graph::core::BundleHandle* const copiedBundles,
size_t* const copiedCount) noexcept;
//! <b>Feature not implemented yet.</b>
//!
//! @brief Link content from the source bundle to this bundle.
//!
//! If source handle is invalid, then operation is skipped.
//!
//! @param sourceBundle Handle to bundle whose data is to be linked.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result linkBundle(const omni::graph::core::ConstBundleHandle* const sourceBundle) noexcept;
//! @brief Add a set of bundles as children to this bundle as links.
//!
//! Created bundles are links to other bundles that are part of another bundle.
//! If target handle is invalid, then operation is skipped.
//! The links are owned by this bundle, but targets of the links are not.
//! Removing links from this bundle does not destroy the targets data.
//!
//! @param linkNames Names for new links.
//! @param targetBundles Handles to bundles whose data is to be added.
//! @param bundleCount Number of bundles to be added.
//! @param linkedBundles Handles to linked bundles. Can be nullptr if no output is required.
//! @param linkedCount Number of child bundles successfully linked.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result linkChildBundles(const omni::graph::core::NameToken* const linkNames,
const omni::graph::core::ConstBundleHandle* const targetBundles,
size_t bundleCount,
omni::graph::core::BundleHandle* const linkedBundles,
size_t* const linkedCount) noexcept;
//! @brief Remove attributes based on provided handles.
//!
//! Lookup the attribute handles and if they are part of this bundle then remove attributes' data and
//! metadata. Attribute handles that are not part of this bundle are ignored.
//!
//! @param attributes Handles to attributes whose data is to be removed
//! @param attributeCount Number of attributes to be removed.
//! @param removedCount Number of attributes successfully removed.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result removeAttributes(const omni::graph::core::ConstAttributeDataHandle* const attributes,
size_t attributeCount,
size_t* const removedCount) noexcept;
//! @brief Remove attributes based on provided names.
//!
//! Lookup the attribute names and if they are part of this bundle then remove attributes' data and
//! metadata. Attribute names that are not part of this bundle are ignored.
//!
//! @param names The names of the attributes whose data is to be removed.
//! @param nameCount Number of attributes to be removed.
//! @param removedCount Number of attributes successfully removed.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result removeAttributesByName(const omni::graph::core::NameToken* const names,
size_t nameCount,
size_t* const removedCount) noexcept;
//! @brief Remove child bundles based on provided handles.
//!
//! Lookup the bundle handles and if they are children of the bundle then remove them and their metadata.
//! Bundle handles that are not children of this bundle are ignored.
//! Only empty child bundles can be removed.
//!
//! @param childHandles Handles to bundles to be removed.
//! @param childCount Number of child bundles to be removed.
//! @param removedCount Number of child bundles successfully removed.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result removeChildBundles(const omni::graph::core::ConstBundleHandle* const childHandles,
size_t childCount,
size_t* const removedCount) noexcept;
//! @brief Remove child bundles based on provided names.
//!
//! Lookup the bundle names and if the are children of the bundle then remove them and their metadata.
//! Bundle names that are not children of this bundle are ignored.
//! Only empty child bundles can be removed.
//!
//! @param names The names of the child bundles to be removed.
//! @param nameCount Number of child bundles to be removed.
//! @param removedCount Number of child bundles successfully removed.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result removeChildBundlesByName(const omni::graph::core::NameToken* const names,
size_t nameCount,
size_t* const removedCount) noexcept;
//! Return Bundle Handle to Metadata Storage
omni::graph::core::BundleHandle getMetadataStorage() noexcept;
//! @brief Search for bundle metadata fields based on provided names.
//!
//! Invalid attribute handles are returned for not existing names.
//!
//! @param fieldNames Bundle metadata field names to be searched for.
//! @param fieldCount Size of fieldNames and bundleMetadata arrays.
//! @param bundleMetadata Handles to bundle metadata fields in this bundle.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getBundleMetadataByName(const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::AttributeDataHandle* const bundleMetadata) noexcept;
//! @brief Create bundle metadata fields in this bundle.
//!
//! @param fieldNames Names of new bundle metadata fields.
//! @param fieldTypes Types of new bundle metadata fields.
//! @param elementCount Number of elements in the array, can be `nullptr` if field is not an array.
//! @param fieldCount Size of fieldNames and fieldTypes arrays.
//! @param bundleMetadata Handles to the newly created bundle metadata fields. Can be `nullptr` if no output is
//! required.
//! @param createdCount Number of child bundles successfully created.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result createBundleMetadata(const omni::graph::core::NameToken* const fieldNames,
const omni::graph::core::Type* const fieldTypes,
const size_t* const elementCount,
size_t fieldCount,
omni::graph::core::AttributeDataHandle* const bundleMetadata,
size_t* const createdCount) noexcept;
//! @brief Remove bundle metadata based on provided field names.
//!
//! @param fieldNames Names of the bundle metadata fields whose data is to be removed.
//! @param fieldCount Number of the bundle metadata fields to be removed.
//! @param removedCount Number of bundle metadata fields successfully removed.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result removeBundleMetadata(const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
size_t* const removedCount) noexcept;
//! @brief Search for read write field handles in the attribute by using field names.
//!
//! @copydetails IConstBundle2_abi::getConstAttributeMetadataByName_abi
omni::core::Result getAttributeMetadataByName(omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::AttributeDataHandle* const attributeMetadata) noexcept;
//! @brief Create attribute metadata fields.
//!
//! @param attribute Name of the attribute.
//! @param fieldNames Names of new attribute metadata fields.
//! @param fieldTypes Types of new attribute metadata fields.
//! @param elementCount Number of elements in the array, can be `nullptr` if field is not an array.
//! @param fieldCount Size of fieldNames and fieldTypes arrays.
//! @param attributeMetadata Handles to the newly created attribute metadata. Can be `nullptr` if no output is
//! required.
//! @param removedCount Number of attribute metadata fields successfully created.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result createAttributeMetadata(omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
const omni::graph::core::Type* const fieldTypes,
const size_t* const elementCount,
size_t fieldCount,
omni::graph::core::AttributeDataHandle* const attributeMetadata,
size_t* const removedCount) noexcept;
//! @brief Remove attribute metadata fields.
//!
//! @param attribute Name of the attribute.
//! @param fieldNames Names of the attribute metadata fields to be removed.
//! @param fieldCount Size of fieldNames array.
//! @param removedCount Number of attribute metadata fields successfully removed.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result removeAttributeMetadata(omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
size_t* const removedCount) noexcept;
//! @brief Remove all attributes, child bundles and metadata from this bundle, but keep the bundle itself.
//!
//! @param bundleMetadata Clears bundle metadata in this bundle.
//! @param attributes Clears attributes in this bundle.
//! @param childBundles Clears child bundles in this bundle.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result clearContents(bool bundleMetadata, bool attributes, bool childBundles) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::core::BundleHandle omni::core::Generated<omni::graph::core::IBundle2_abi>::getHandle() noexcept
{
return getHandle_abi();
}
inline omni::graph::core::BundleHandle omni::core::Generated<omni::graph::core::IBundle2_abi>::getParentBundle() noexcept
{
return getParentBundle_abi();
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributes(
omni::graph::core::AttributeDataHandle* const attributes, size_t* const attributeCount) noexcept
{
return getAttributes_abi(attributes, attributeCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributesByName(
const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::AttributeDataHandle* const attributes) noexcept
{
return getAttributesByName_abi(names, nameCount, attributes);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getChildBundles(
omni::graph::core::BundleHandle* const bundles, size_t* const bundleCount) noexcept
{
return getChildBundles_abi(bundles, bundleCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getChildBundle(
size_t bundleIndex, omni::graph::core::BundleHandle* const bundle) noexcept
{
return getChildBundle_abi(bundleIndex, bundle);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getChildBundlesByName(
const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::BundleHandle* const foundBundles) noexcept
{
return getChildBundlesByName_abi(names, nameCount, foundBundles);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::copyAttributes(
const omni::graph::core::NameToken* const newNames,
const omni::graph::core::ConstAttributeDataHandle* const sourceAttributes,
size_t attributeCount,
bool overwrite,
omni::graph::core::AttributeDataHandle* const copiedAttributes,
size_t* const copiedCount) noexcept
{
return copyAttributes_abi(newNames, sourceAttributes, attributeCount, overwrite, copiedAttributes, copiedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createAttributes(
const omni::graph::core::NameToken* const names,
const omni::graph::core::Type* const types,
const size_t* const elementCount,
size_t attributeCount,
omni::graph::core::AttributeDataHandle* const createdAttributes,
size_t* const createdCount) noexcept
{
return createAttributes_abi(names, types, elementCount, attributeCount, createdAttributes, createdCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createAttributesLike(
const omni::graph::core::ConstAttributeDataHandle* const patternAttributes,
size_t patternCount,
omni::graph::core::AttributeDataHandle* const createdAttributes,
size_t* const createdCount) noexcept
{
return createAttributesLike_abi(patternAttributes, patternCount, createdAttributes, createdCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createChildBundles(
const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::BundleHandle* const createdBundles,
size_t* const createdCount) noexcept
{
return createChildBundles_abi(names, nameCount, createdBundles, createdCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::linkAttributes(
const omni::graph::core::NameToken* const linkNames,
const omni::graph::core::ConstAttributeDataHandle* const targetAttributes,
size_t attributeCount,
omni::graph::core::AttributeDataHandle* const linkedAttributes,
size_t* const linkedCount) noexcept
{
return linkAttributes_abi(linkNames, targetAttributes, attributeCount, linkedAttributes, linkedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::copyBundle(
const omni::graph::core::ConstBundleHandle& sourceBundle, bool overwrite) noexcept
{
return copyBundle_abi(sourceBundle, overwrite);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::copyChildBundles(
const omni::graph::core::NameToken* const newNames,
const omni::graph::core::ConstBundleHandle* const sourceBundles,
size_t bundleCount,
bool overwrite,
omni::graph::core::BundleHandle* const copiedBundles,
size_t* const copiedCount) noexcept
{
return copyChildBundles_abi(newNames, sourceBundles, bundleCount, overwrite, copiedBundles, copiedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::linkBundle(
const omni::graph::core::ConstBundleHandle* const sourceBundle) noexcept
{
return linkBundle_abi(sourceBundle);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::linkChildBundles(
const omni::graph::core::NameToken* const linkNames,
const omni::graph::core::ConstBundleHandle* const targetBundles,
size_t bundleCount,
omni::graph::core::BundleHandle* const linkedBundles,
size_t* const linkedCount) noexcept
{
return linkChildBundles_abi(linkNames, targetBundles, bundleCount, linkedBundles, linkedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributes(
const omni::graph::core::ConstAttributeDataHandle* const attributes,
size_t attributeCount,
size_t* const removedCount) noexcept
{
return removeAttributes_abi(attributes, attributeCount, removedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributesByName(
const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept
{
return removeAttributesByName_abi(names, nameCount, removedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeChildBundles(
const omni::graph::core::ConstBundleHandle* const childHandles, size_t childCount, size_t* const removedCount) noexcept
{
return removeChildBundles_abi(childHandles, childCount, removedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeChildBundlesByName(
const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept
{
return removeChildBundlesByName_abi(names, nameCount, removedCount);
}
inline omni::graph::core::BundleHandle omni::core::Generated<omni::graph::core::IBundle2_abi>::getMetadataStorage() noexcept
{
return getMetadataStorage_abi();
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getBundleMetadataByName(
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::AttributeDataHandle* const bundleMetadata) noexcept
{
return getBundleMetadataByName_abi(fieldNames, fieldCount, bundleMetadata);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createBundleMetadata(
const omni::graph::core::NameToken* const fieldNames,
const omni::graph::core::Type* const fieldTypes,
const size_t* const elementCount,
size_t fieldCount,
omni::graph::core::AttributeDataHandle* const bundleMetadata,
size_t* const createdCount) noexcept
{
return createBundleMetadata_abi(fieldNames, fieldTypes, elementCount, fieldCount, bundleMetadata, createdCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeBundleMetadata(
const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, size_t* const removedCount) noexcept
{
return removeBundleMetadata_abi(fieldNames, fieldCount, removedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributeMetadataByName(
omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::AttributeDataHandle* const attributeMetadata) noexcept
{
return getAttributeMetadataByName_abi(attribute, fieldNames, fieldCount, attributeMetadata);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createAttributeMetadata(
omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
const omni::graph::core::Type* const fieldTypes,
const size_t* const elementCount,
size_t fieldCount,
omni::graph::core::AttributeDataHandle* const attributeMetadata,
size_t* const removedCount) noexcept
{
return createAttributeMetadata_abi(
attribute, fieldNames, fieldTypes, elementCount, fieldCount, attributeMetadata, removedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributeMetadata(
omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
size_t* const removedCount) noexcept
{
return removeAttributeMetadata_abi(attribute, fieldNames, fieldCount, removedCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::clearContents(bool bundleMetadata,
bool attributes,
bool childBundles) noexcept
{
return clearContents_abi(bundleMetadata, attributes, childBundles);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/core/IDataModel.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "Handle.h"
#include <carb/Interface.h>
namespace omni
{
namespace graph
{
namespace core
{
/**
* Interface to the underlying data access for OmniGraph
*/
struct IDataModel
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::IDataModel", 1, 1);
// This shouldn't exist...all data model changes should be implemented via data model interface
// until this is done, we expose a way to protect edits to data model...like ones in PrimCommon
/**
* @return Allocate and return new scoped lock for read or write.
*/
void* (CARB_ABI* enterEditScope)(bool writer);
/**
* @param[in] scope Free scoped lock
*/
void (CARB_ABI* exitEditScope)(void* scope);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IDataModel, exitEditScope, 1)
//! Scoping object to enter and exist editing mode for the DataModel
class DataModelEditScope
{
public:
//! Constructor to enter the edit scope, optionally with write mode enabled
DataModelEditScope(bool write)
{
static const IDataModel& iDataModel = *carb::getCachedInterface<IDataModel>();
m_scope = iDataModel.enterEditScope(write);
}
//! Destructor that exits the DataModel edit scope
~DataModelEditScope()
{
static const IDataModel& iDataModel = *carb::getCachedInterface<IDataModel>();
iDataModel.exitEditScope(m_scope);
}
private:
void* m_scope{nullptr};
};
}
}
}
|
omniverse-code/kit/include/omni/graph/core/TemplateUtils.h | // Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <utility>
#include <type_traits>
// ======================================================================
// Implementation of the C++20 feature to detect whether a type is a bounded array (e.g. int[2], float[3]...)
template<class T> struct is_bounded_array: std::false_type {};
template<class T, std::size_t N> struct is_bounded_array<T[N]> : std::true_type {};
// ======================================================================
// When we move to C++17 we can replace and_ with std::conjunction
// Recursively applies std::conditional to all of the template arguments.
template <typename... Conds>
struct and_ : std::true_type
{
};
template <typename Cond, typename... Conds>
struct and_<Cond, Conds...> : std::conditional<Cond::value, and_<Conds...>, std::false_type>::type
{
};
// ======================================================================
// When we move to C++17 we can replace "fold" with C++ fold expression
// Ex: args && ...
// Recursively applies provided functor to all of the template arguments.
// Ex: fold(std::logical_and<>(), args...)
template <class F, class A0>
auto fold(F&&, A0&& a0)
{
return std::forward<A0>(a0);
}
template <class F, class A0, class... As>
auto fold(F&& f, A0&& a0, As&&... as)
{
return f(std::forward<A0>(a0), fold(f, std::forward<As>(as)...));
}
// ======================================================================
// Removes const& qualifier on a type
template <typename T>
using remove_const_ref = std::remove_const<typename std::remove_reference<T>::type>;
// ======================================================================
// Check to see if a list of types are all of the named type
template <typename MembersAreThisType, typename... MemberType>
using areTypeT = and_<std::is_same<MembersAreThisType, MemberType>...>;
// ======================================================================
// Templatized version of void
template <typename... Ts>
using void_t = void;
// ======================================================================
// This set of templates is used to define a metaprogram "is_detected" that derives from
// std::true_type if the declared templated function exists and std::false_type if not
// (for use in compile-time overload selection, described below).
namespace detail
{
// Matches a call with any type, any templated type, and a variable length list of any types.
// There has to be a typename as the first parameter because another template can't be one.
// Using the void_t<> type defined above allows this parameter to be used for SFINAE selection.
template <typename, template <typename...> class, typename...>
struct is_detected : std::false_type
{
};
// This specialization of detail::is_detected triggers only when the Operation can be instantiated
// with the Arguments. For method checks the "has_X" templates above will be legal types when the
// class mentioned as the first member of "Arguments" implements the method "X". This in turn will
// make void_t<Operation<Arguments...>>> a legal type. In those situations this specialization will
// succeed and is_detected<> will be a std::true_type.
template <template <class...> class Operation, typename... Arguments>
struct is_detected<void_t<Operation<Arguments...>>, Operation, Arguments...> : std::true_type
{
};
}
// This is used only to hide the implementation detail of using the void_t<> template argument to
// guide the SFINAE substitution which detects method overrides. That way the templates below can
// use this more natural pattern:
// is_detected<has_X, NodeTypeClass>()
// instead of this:
// detail::is_detected<void_t<>, has_X, NodeTypeClass>()
template <template <class...> class Operation, typename... Arguments>
using is_detected = ::detail::is_detected<void_t<>, Operation, Arguments...>;
|
omniverse-code/kit/include/omni/graph/core/PyIConstBundle.gen.h | // Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindIConstBundle2(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::core::IConstBundle2_abi>,
omni::core::ObjectPtr<omni::core::Generated<omni::graph::core::IConstBundle2_abi>>, omni::core::IObject>
clsParent(m, "_IConstBundle2");
py::class_<omni::graph::core::IConstBundle2, omni::core::Generated<omni::graph::core::IConstBundle2_abi>,
omni::core::ObjectPtr<omni::graph::core::IConstBundle2>, omni::core::IObject>
cls(m, "IConstBundle2", R"OMNI_BIND_RAW_(Provide read only access to recursive bundles.)OMNI_BIND_RAW_");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::core::IConstBundle2>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::core::IConstBundle2>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::core::IConstBundle2 instantiation");
}
return tmp;
}));
cls.def_property_readonly("valid", &omni::graph::core::IConstBundle2::isValid);
return omni::python::PyBind<omni::graph::core::IConstBundle2>::bind(cls);
}
|
omniverse-code/kit/include/omni/graph/core/ISchedulingHints.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/core/IObject.h>
#include <omni/core/Omni.h>
#include <omni/inspect/IInspector.h>
namespace omni
{
namespace graph
{
namespace core
{
//! How does the node access the data described by the enum eAccessLocation
enum class eAccessType
{
//! There is no access to data of the associated type
eNone,
//! There is only read access to data of the associated type
eRead,
//! There is only write access to data of the associated type
eWrite,
//! There is both read and write access to data of the associated type
eReadWrite
};
//! What type of non-attribute data does this node access
enum class eAccessLocation
{
//! Accesses the USD stage data
eUsd,
//! Accesses data that is not part of the node or node type
eGlobal,
//! Accesses data that is shared by every instance of a particular node type
eStatic,
//! Accesses information on the topology of the graph to which the node belongs
eTopology
};
//! How thread safe is the node during evaluation
enum class eThreadSafety
{
//! Nodes can be evaluated in multiple threads safely
eSafe,
//! Nodes cannot be evaluated in multiple threads safely
eUnsafe,
//! The thread safety status of the node type is unknown
eUnknown
};
//! How the node is allowed to be computed
enum class eComputeRule
{
//! Nodes are computed according to the default evaluator rules
eDefault,
//! The evaluator may skip computing this node until explicitly requested with INode::requestCompute
eOnRequest
};
//! Declare the ISchedulingHints interface definition
OMNI_DECLARE_INTERFACE(ISchedulingHints);
//! Interface to the list of scheduling hints that can be applied to a node type
class ISchedulingHints_abi
: public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.ISchedulingHints")>
{
protected:
/**
* Get the threadSafety status (i.e. can be run in parallel with other nodes)
*
* @returns Is the node compute threadsafe?
*/
virtual eThreadSafety getThreadSafety_abi() noexcept = 0;
/**
* Set the flag indicating if a node is threadsafe or not.
*
* @param[in] newThreadSafety New value of the threadsafe flag
*/
virtual void setThreadSafety_abi(eThreadSafety newThreadSafety) noexcept = 0;
/**
* Get the type of access the node has for a given data type
*
* @param[in] dataType Type of data for which access type is being modified
* @returns Value of the access type flag
*/
virtual eAccessType getDataAccess_abi(eAccessLocation dataType) noexcept = 0;
/**
* Set the flag describing how a node accesses particular data in its compute _abi (defaults to no access).
* Setting any of these flags will, in most cases, automatically mark the node as "not threadsafe".
* One current exception to this is allowing a node to be both threadsafe and a writer to USD, since
* such behavior can be achieved if delayed writebacks (e.g. "registerForUSDWriteBack") are utilized
* in the node's compute method.
*
* @param[in] dataType Type of data for which access type is being modified
* @param[in] newAccessType New value of the access type flag
*/
virtual void setDataAccess_abi(eAccessLocation dataType, eAccessType newAccessType) noexcept = 0;
/**
* Get the flag describing the compute rule which may be followed by the evaluator.
*
* @returns Value of the ComputeRule flag
*/
virtual eComputeRule getComputeRule_abi() noexcept = 0;
/**
* Set the flag describing the compute rule which may be followed by the evaluator.
*
* @param[in] newComputeRule New value of the ComputeRule flag
*/
virtual void setComputeRule_abi(eComputeRule newComputeRule) noexcept = 0;
/**
* Runs the inspector on the scheduling hints.
*
* @param[in] inspector The inspector class
* @return true if the inspection ran successfully, false if the inspection type is not supported
*/
virtual bool inspect_abi(omni::inspect::IInspector* inspector) noexcept = 0;
};
} // namespace core
} // namespace graph
} // namespace omni
#include "ISchedulingHints.gen.h"
|
omniverse-code/kit/include/omni/graph/core/CppWrappers.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "TemplateUtils.h"
#include <carb/Defines.h>
#include <omni/graph/core/iComputeGraph.h>
#include <omni/graph/core/CudaUtils.h>
#include <string>
#include <tuple>
#include <type_traits>
#include <vector>
#include <gsl/span>
namespace omni
{
namespace graph
{
namespace core
{
// The templates for extracting data do not like double pointers so use this type for assigning strings
using CString = char*;
// NOTE: This file is a work in progress, for assessing possible interfaces, not yet for use.
// -Wall will warn about these inline functions not being used
#if defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wunused-function"
#endif
using Path = omni::fabric::Path;
using Token = omni::fabric::Token;
// ----------------------------------------------------------------------------
// Accessors for accessing attributes by name
static ConstAttributeDataHandle getAttributeR(GraphContextObj const& contextObj,
ConstBundleHandle bundleHandle,
fabric::TokenC name)
{
ConstAttributeDataHandle out;
contextObj.iBundle->getAttributesByNameR(&out, contextObj, bundleHandle, &name, 1);
return out;
}
static ConstAttributeDataHandle getAttributeR(GraphContextObj const& contextObj,
ConstBundleHandle bundleHandle,
Token const& name)
{
NameToken nameToken = name;
return getAttributeR(contextObj, bundleHandle, nameToken);
}
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getAttributesR(const GraphContextObj& contextObj,
ConstBundleHandle& primHandle,
std::tuple<NameTypes...> names)
{
// Check that size(out)==len(names) and that every element of names has type size_t
const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value;
const size_t outCount = std::tuple_size<std::tuple<Types...>>::value;
static_assert(inCount == outCount, "Input and output tuples must be of same length");
static_assert(areTypeT<NameToken, NameTypes...>::value || areTypeT<Token, NameTypes...>::value,
"Attribute names must have type NameToken or Token");
static_assert(areTypeT<ConstAttributeDataHandle, Types...>::value, "Outputs must have type ConstAttributeDataHandle");
std::tuple<Types...> out;
// Cast to C-ABI compatible types
std::tuple<Types...>* outTuplePtr = &out;
ConstAttributeDataHandle* outPtr = reinterpret_cast<ConstAttributeDataHandle*>(outTuplePtr);
std::tuple<NameTypes...>* inTuplePtr = &names;
NameToken* namesPtr = reinterpret_cast<NameToken*>(inTuplePtr);
// Call C-ABI version of method
contextObj.iBundle->getAttributesByNameR(outPtr, contextObj, primHandle, namesPtr, inCount);
return out;
}
static AttributeDataHandle getAttributeW(const GraphContextObj& contextObj, BundleHandle& primHandle, const Token& name)
{
AttributeDataHandle out;
NameToken nameToken = name;
contextObj.iBundle->getAttributesByNameW(&out, contextObj, primHandle, &nameToken, 1);
return out;
}
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getAttributesW(const GraphContextObj& contextObj,
BundleHandle& primHandle,
std::tuple<NameTypes...> names)
{
// Check that size(out)==len(names) and that every element of names has type size_t
const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value;
const size_t outCount = std::tuple_size<std::tuple<Types...>>::value;
static_assert(inCount == outCount, "Input and output tuples must be of same length");
static_assert(areTypeT<NameToken, NameTypes...>::value || areTypeT<Token, NameTypes...>::value,
"Attribute names must have type NameToken or Token");
static_assert(areTypeT<AttributeDataHandle, Types...>::value, "Outputs must have type AttributeDataHandle");
std::tuple<Types...> out;
// Cast to C-ABI compatible types
std::tuple<Types...>* outTuplePtr = &out;
AttributeDataHandle* outPtr = reinterpret_cast<AttributeDataHandle*>(outTuplePtr);
std::tuple<NameTypes...>* inTuplePtr = &names;
HandleInt* namesPtr = reinterpret_cast<HandleInt*>(inTuplePtr);
// Call C-ABI version of method
contextObj.iBundle->getAttributesByNameW(outPtr, contextObj, primHandle, namesPtr, inCount);
return out;
}
// ----------------------------------------------------------------------------
// Accessors for accessing attributes by AttributeDataHandle
// Specialization for the read-only data of a single attribute. Get a single attribute handle from getAttributeR
// to pass in as the second parameter.
// auto constHandle = getAttributeR(contextObj, node, Token("myAttribute"))
// // Note that the template parameter for the special case is the data type, not a pointer type
// const auto dataPtr = getDataR<float>(contextObj, constHandle);
template <typename T>
const T* getDataR(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle)
{
const T* out;
const void** outPtr = reinterpret_cast<const void**>(&out);
contextObj.iAttributeData->getDataR(outPtr, contextObj, &attrHandle, 1);
return out;
}
// Get the read-only data for an arbitrary list of attributes. Get a tuple attribute handle from getAttributesR
// to pass in as the second parameter.
// auto constHandles = getAttributesR<ConstAttributeDataHandle, ConstAttributeDataHandle>(contextObj, node,
// std::make_tuple(Token("a"), Token("b")))
// const float* a_value{nullptr};
// const float* b_value{nullptr};
// // Note that the template parameters for the general case are pointers to the data type
// std::tie(a_value, b_value) = getDataR<float*, float*>(contextObj, constHandles);
template <typename... Types, typename... HandleTypes>
std::tuple<Types...> getDataR(const GraphContextObj& contextObj, std::tuple<HandleTypes...> handles)
{
// Check that size(out)==len(names) and that every element of names has type size_t
const size_t inCount = std::tuple_size<std::tuple<HandleTypes...>>::value;
const size_t outCount = std::tuple_size<std::tuple<Types...>>::value;
static_assert(inCount == outCount, "Input and output tuples must be of same length");
static_assert(areTypeT<ConstAttributeDataHandle, HandleTypes...>::value,
"Attribute handles must have type ConstAttributeDataHandle");
std::tuple<Types...> out;
// Cast to C-ABI compatible types
std::tuple<Types...>* outTuplePtr = &out;
const void** outPtr = reinterpret_cast<const void**>(outTuplePtr);
std::tuple<HandleTypes...>* inTuplePtr = &handles;
ConstAttributeDataHandle* handlesPtr = reinterpret_cast<ConstAttributeDataHandle*>(inTuplePtr);
// Call C-ABI version of method
contextObj.iAttributeData->getDataR(outPtr, contextObj, handlesPtr, inCount);
return out;
}
// Specialization for the read-only GPU data of a single attribute. Get a single attribute handle from getAttributeR
// to pass in as the second parameter.
// auto handle = getAttributeR(contextObj, node, Token("myAttribute"))
// // Note that the template parameter for the special case is the data type, not a pointer type
// const auto gpuData = getDataRGPU<float>(contextObj, handle);
// If you wish to get CPU pointers to GPU data for arrays then use the alternative version:
// const auto gpuData = getDataRGPUOnCPU<float>(contextObj, handle);
template <typename T>
const T* getDataRGpuAt(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle, omni::fabric::PtrToPtrKind where)
{
const T* out;
const void** outPtr = reinterpret_cast<const void**>(&out);
contextObj.iAttributeData->getDataRGpuAt(outPtr, contextObj, &attrHandle, 1, where);
return out;
}
// Syntactic sugar
template <typename T>
const T* getDataRGPU(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle)
{
return getDataRGpuAt<T>(contextObj, attrHandle, omni::fabric::PtrToPtrKind::eGpuPtrToGpuPtr);
}
template <typename T>
const T* getDataRGPUOnCPU(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle)
{
return getDataRGpuAt<T>(contextObj, attrHandle, omni::fabric::PtrToPtrKind::eCpuPtrToGpuPtr);
}
// Get the read-only GPU data for an arbitrary list of attributes. Get a tuple attribute handle from getAttributesR
// to pass in as the second parameter.
// auto handles = getAttributesR<ConstAttributeDataHandle, ConstAttributeDataHandle>(contextObj, node,
// std::make_tuple(Token("a"), Token("b")))
// const float* a_gpu_value{nullptr};
// const float* b_gpu_value{nullptr};
// // Note that the template parameters for the general case are pointers to the data type
// std::tie(a_gpu_value, b_gpu_value) = getDataRGPU<float*, float*>(contextObj, handles);
// If you wish to get CPU pointers to GPU data for arrays then use the alternative version:
// std::tie(a_gpu_value, b_gpu_value) = getDataRGPUOnCPU<float*, float*>(contextObj, handles);
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getDataRGpuAt(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles, omni::fabric::PtrToPtrKind where)
{
// Check that size(out)==len(names) and that every element of names has type size_t
const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value;
const size_t outCount = std::tuple_size<std::tuple<Types...>>::value;
static_assert(inCount == outCount, "Input and output tuples must be of same length");
static_assert(areTypeT<ConstAttributeDataHandle, NameTypes...>::value,
"Attribute handles must have type ConstAttributeDataHandle");
std::tuple<Types...> out;
// Cast to C-ABI compatible types
std::tuple<Types...>* outTuplePtr = &out;
const void** outPtr = reinterpret_cast<const void**>(outTuplePtr);
std::tuple<NameTypes...>* inTuplePtr = &handles;
ConstAttributeDataHandle* handlesPtr = reinterpret_cast<ConstAttributeDataHandle*>(inTuplePtr);
// Call C-ABI version of method
contextObj.iAttributeData->getDataRGpuAt(outPtr, contextObj, handlesPtr, inCount, where);
return out;
}
// Syntactic sugar
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getDataRGPU(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles)
{
return getDataRGpuAt<Types...>(contextObj, handles, omni::fabric::PtrToPtrKind::eGpuPtrToGpuPtr);
}
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getDataRGPUOnCPU(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles)
{
return getDataRGpuAt<Types...>(contextObj, handles, omni::fabric::PtrToPtrKind::eCpuPtrToGpuPtr);
}
// Specialization for the writable data of a single attribute. Get a single attribute handle from getAttributeW
// to pass in as the second parameter.
// auto handle = getAttributeW(contextObj, node, Token("myAttribute"))
// // Note that the template parameter for the special case is the data type, not a pointer type
// auto dataPtr = getDataW<float>(contextObj, handle);
template <typename T>
T* getDataW(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle)
{
T* out;
void** outPtr = reinterpret_cast<void**>(&out);
contextObj.iAttributeData->getDataW(outPtr, contextObj, &attrHandle, 1);
return out;
}
// Get the writable data for an arbitrary list of attributes. Get a tuple attribute handle from getAttributesW
// to pass in as the second parameter.
// auto handles = getAttributesW<AttributeDataHandle, AttributeDataHandle>(contextObj, node,
// std::make_tuple(Token("a"), Token("b")))
// float* a_value{nullptr};
// float* b_value{nullptr};
// // Note that the template parameters for the general case are pointers to the data type
// std::tie(a_value, b_value) = getDataW<float*, float*>(contextObj, handles);
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getDataW(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles)
{
// Check that size(out)==len(names) and that every element of names has type size_t
const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value;
const size_t outCount = std::tuple_size<std::tuple<Types...>>::value;
static_assert(inCount == outCount, "Input and output tuples must be of same length");
static_assert(
areTypeT<AttributeDataHandle, NameTypes...>::value, "Attribute handles must have type AttributeDataHandle");
std::tuple<Types...> out;
// Cast to C-ABI compatible types
std::tuple<Types...>* outTuplePtr = &out;
void** outPtr = reinterpret_cast<void**>(outTuplePtr);
std::tuple<NameTypes...>* inTuplePtr = &handles;
AttributeDataHandle* handlesPtr = reinterpret_cast<AttributeDataHandle*>(inTuplePtr);
// Call C-ABI version of method
contextObj.iAttributeData->getDataW(outPtr, contextObj, handlesPtr, inCount);
return out;
}
// Specialization for the writable GPU data of a single attribute. Get a single attribute handle from getAttributeW
// to pass in as the second parameter.
// auto handle = getAttributeW(contextObj, node, Token("myAttribute"))
// // Note that the template parameter for the special case is the data type, not a pointer type
// auto gpuData = getDataWGPU<float>(contextObj, handle);
// If you wish to get CPU pointers to GPU data for arrays then use the alternative version:
// auto gpuData = getDataWGPUOnCPU<float>(contextObj, handle);
template <typename T>
T* getDataWGpuAt(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, omni::fabric::PtrToPtrKind whereGpuPtrs)
{
T* out;
void** outPtr = reinterpret_cast<void**>(&out);
contextObj.iAttributeData->getDataWGpuAt(outPtr, contextObj, &attrHandle, 1, whereGpuPtrs);
return out;
}
// Syntactic sugar
template <typename T>
T* getDataWGPU(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle)
{
return getDataWGpuAt<T>(contextObj, attrHandle, omni::fabric::PtrToPtrKind::eGpuPtrToGpuPtr);
}
template <typename T>
T* getDataWGPUOnCPU(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle)
{
return getDataWGpuAt<T>(contextObj, attrHandle, omni::fabric::PtrToPtrKind::eCpuPtrToGpuPtr);
}
// Get the writable GPU data for an arbitrary list of attributes. Get a tuple attribute handle from getAttributesW
// to pass in as the second parameter.
// auto handles = getAttributesW<AttributeDataHandle, AttributeDataHandle>(contextObj, node,
// std::make_tuple(Token("a"), Token("b")))
// float* a_gpu_value{nullptr};
// float* b_gpu_value{nullptr};
// // Note that the template parameters for the general case are pointers to the data type
// std::tie(a_gpu_value, b_gpu_value) = getDataWGPU<float*, float*>(contextObj, handles);
// If you wish to get CPU pointers to GPU data for arrays then use the alternative version:
// std::tie(a_gpu_value, b_gpu_value) = getDataWGPUOnCPU<float*, float*>(contextObj, handles);
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getDataWGpuAt(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles, omni::fabric::PtrToPtrKind whereGpuPtrs)
{
// Check that size(out)==len(names) and that every element of names has type size_t
const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value;
const size_t outCount = std::tuple_size<std::tuple<Types...>>::value;
static_assert(inCount == outCount, "Input and output tuples must be of same length");
static_assert(
areTypeT<AttributeDataHandle, NameTypes...>::value, "Attribute handles must have type AttributeDataHandle");
std::tuple<Types...> out;
// Cast to C-ABI compatible types
std::tuple<Types...>* outTuplePtr = &out;
void** outPtr = reinterpret_cast<void**>(outTuplePtr);
std::tuple<NameTypes...>* inTuplePtr = &handles;
AttributeDataHandle* handlesPtr = reinterpret_cast<AttributeDataHandle*>(inTuplePtr);
// Call C-ABI version of method
contextObj.iAttributeData->getDataWGpuAt(outPtr, contextObj, handlesPtr, inCount, whereGpuPtrs);
return out;
}
// Syntactic sugar
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getDataWGPU(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles)
{
return getDataWGpuAt<Types...>(contextObj, handles, omni::fabric::PtrToPtrKind::eGpuPtrToGpuPtr);
}
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getDataWGPUOnCPU(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles)
{
return getDataWGpuAt<Types...>(contextObj, handles, omni::fabric::PtrToPtrKind::eCpuPtrToGpuPtr);
}
static size_t getElementCount(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle)
{
size_t out;
ConstAttributeDataHandle constAttrHandle = attrHandle;
contextObj.iAttributeData->getElementCount(&out, contextObj, &constAttrHandle, 1);
return out;
}
static size_t getElementCount(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle)
{
size_t out;
contextObj.iAttributeData->getElementCount(&out, contextObj, &attrHandle, 1);
return out;
}
template <typename... Types, typename... HandleTypes>
std::tuple<Types...> getElementCount(const GraphContextObj& contextObj, std::tuple<HandleTypes...> handles)
{
// Check that size(out)==len(names) and that every element of names has type size_t
const size_t inCount = std::tuple_size<std::tuple<HandleTypes...>>::value;
const size_t outCount = std::tuple_size<std::tuple<Types...>>::value;
static_assert(inCount == outCount, "Input and output tuples must be of same length");
static_assert(areTypeT<ConstAttributeDataHandle, HandleTypes...>::value ||
areTypeT<AttributeDataHandle, HandleTypes...>::value,
"Attribute handles must have type ConstAttributeDataHandle or AttributeDataHandle");
static_assert(areTypeT<size_t, Types...>::value, "Outputs must have type size_t");
std::tuple<Types...> out;
// Cast to C-ABI compatible types
std::tuple<Types...>* outTuplePtr = &out;
size_t* outPtr = reinterpret_cast<size_t*>(outTuplePtr);
std::tuple<HandleTypes...>* inTuplePtr = &handles;
ConstAttributeDataHandle* handlesPtr = reinterpret_cast<ConstAttributeDataHandle*>(inTuplePtr);
// Call C-ABI version of method
contextObj.iAttributeData->getElementCount(outPtr, contextObj, handlesPtr, inCount);
return out;
}
//-----------------------------------------------------------------------------
static std::vector<ConstAttributeDataHandle> getAttributes(const GraphContextObj& contextObj, ConstBundleHandle bundle)
{
size_t count = contextObj.iBundle->getAttributesCount(contextObj, bundle);
std::vector<ConstAttributeDataHandle> attrsOut(count);
contextObj.iBundle->getAttributesR(attrsOut.data(), contextObj, bundle, count);
return attrsOut;
}
static std::vector<AttributeDataHandle> getAttributes(const GraphContextObj& contextObj, BundleHandle bundle)
{
size_t count = contextObj.iBundle->getAttributesCount(contextObj, bundle);
std::vector<AttributeDataHandle> attrsOut(count);
contextObj.iBundle->getAttributesW(attrsOut.data(), contextObj, bundle, count);
return attrsOut;
}
static void getAttributesByName(ConstAttributeDataHandle* attrsOut,
const GraphContextObj& contextObj,
ConstBundleHandle prim,
const Token* attrNames,
size_t count)
{
const NameToken* nameTokens = reinterpret_cast<const NameToken*>(attrNames);
contextObj.iBundle->getAttributesByNameR(attrsOut, contextObj, prim, nameTokens, count);
}
static void getAttributesByName(AttributeDataHandle* attrsOut,
const GraphContextObj& contextObj,
BundleHandle prim,
const Token* attrNames,
size_t count)
{
const NameToken* nameTokens = reinterpret_cast<const NameToken*>(attrNames);
contextObj.iBundle->getAttributesByNameW(attrsOut, contextObj, prim, nameTokens, count);
}
// ======================================================================
// Context wrappers
// getAttributeR is to be used in conjunction with the single item getDataR specialization to extract read-only data
// from a single attribute. If you have more than one attribute it is best to use getAttributesR, to minimize the calls
// across the ABI boundary. See the getDataR description for more information.
static ConstAttributeDataHandle getAttributeR(const GraphContextObj& contextObj,
NodeContextHandle node,
const Token& name,
InstanceIndex instanceIndex)
{
ConstAttributeDataHandle out;
NameToken nameToken = name;
contextObj.iContext->getAttributesByNameR(&out, contextObj, node, &nameToken, 1, instanceIndex);
return out;
}
// getAttributesR is to be used in conjunction with the general version of getDataR to extract read-only data from an
// arbitrary list of attributes. It uses variadic templates to gather all arguments into a single ABI call. See the
// getDataR description for more information. Sample usage:
// std::tie(a, b, c) = getAttributesR(contextObj, node, std::make_tuple(Token("a"), Token("b"), Token("c")), instanceIndex)
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getAttributesR(const GraphContextObj& contextObj,
NodeContextHandle node,
std::tuple<NameTypes...> names,
InstanceIndex instanceIndex)
{
// Check that size(out)==len(names) and that every element of names has type size_t
const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value;
const size_t outCount = std::tuple_size<std::tuple<Types...>>::value;
static_assert(inCount == outCount, "Input and output tuples must be of same length");
static_assert(areTypeT<NameToken, NameTypes...>::value || areTypeT<Token, NameTypes...>::value,
"Attribute names must have type NameToken or Token");
static_assert(areTypeT<ConstAttributeDataHandle, Types...>::value, "Outputs must have type ConstAttributeDataHandle");
std::tuple<Types...> out;
// Cast to C-ABI compatible types
std::tuple<Types...>* outTuplePtr = &out;
ConstAttributeDataHandle* outPtr = reinterpret_cast<ConstAttributeDataHandle*>(outTuplePtr);
std::tuple<NameTypes...>* inTuplePtr = &names;
NameToken* namesPtr = reinterpret_cast<NameToken*>(inTuplePtr);
// Call C-ABI version of method
contextObj.iContext->getAttributesByNameR(outPtr, contextObj, node, namesPtr, inCount, instanceIndex);
return out;
}
// getAttributeW is to be used in conjunction with the single item getDataW specialization to extract writable data
// from a single attribute. If you have more than one attribute it is best to use getAttributesW, to minimize the calls
// across the ABI boundary. See the getDataW description for more information.
static AttributeDataHandle getAttributeW(const GraphContextObj& contextObj,
NodeContextHandle node,
const Token& name,
InstanceIndex instanceIndex)
{
AttributeDataHandle out;
NameToken nameToken = name;
contextObj.iContext->getAttributesByNameW(&out, contextObj, node, &nameToken, 1, instanceIndex);
return out;
}
// getAttributesW is to be used in conjunction with the general version of getDataW to extract read-only data from an
// arbitrary list of attributes. It uses variadic templates to gather all arguments into a single ABI call. See the
// getDataW description for more information. Sample usage:
// std::tie(a, b, c) = getAttributesW(contextObj, node, std::make_tuple(Token("a"), Token("b"), Token("c")), instanceIndex)
template <typename... Types, typename... NameTypes>
std::tuple<Types...> getAttributesW(const GraphContextObj& contextObj,
NodeContextHandle node,
std::tuple<NameTypes...> names,
InstanceIndex instanceIndex)
{
// Check that size(out)==len(names) and that every element of names has type size_t
const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value;
const size_t outCount = std::tuple_size<std::tuple<Types...>>::value;
static_assert(inCount == outCount, "Input and output tuples must be of same length");
static_assert(areTypeT<NameToken, NameTypes...>::value || areTypeT<Token, NameTypes...>::value,
"Attribute names must have type NameToken or Token");
static_assert(areTypeT<AttributeDataHandle, Types...>::value, "Outputs must have type AttributeDataHandle");
std::tuple<Types...> out;
// Cast to C-ABI compatible types
std::tuple<Types...>* outTuplePtr = &out;
AttributeDataHandle* outPtr = reinterpret_cast<AttributeDataHandle*>(outTuplePtr);
std::tuple<NameTypes...>* inTuplePtr = &names;
NameToken* namesPtr = reinterpret_cast<NameToken*>(inTuplePtr);
// Call C-ABI version of method
contextObj.iContext->getAttributesByNameW(outPtr, contextObj, node, namesPtr, inCount, instanceIndex);
return out;
}
#if defined(__GNUC__)
# pragma GCC diagnostic pop
#endif
}
}
}
|
omniverse-code/kit/include/omni/graph/core/IVariable2.gen.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
/**
* @brief Interface extension for IVariable that adds the ability to set a variable type
*/
template <>
class omni::core::Generated<omni::graph::core::IVariable2_abi> : public omni::graph::core::IVariable2_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::IVariable2")
/**
* Sets the type of the variable.
*
* @param[in] type New type for the variable
*
* @return True if the type is able to be set, false otherwise
*/
bool setType(omni::graph::core::Type type) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::core::IVariable2_abi>::setType(omni::graph::core::Type type) noexcept
{
return setType_abi(type);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/core/BundlePrims.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// ====================================================================================================
/* _____ _ _ _ _ _
| __ \ | \ | | | | | | | |
| | | | ___ | \| | ___ | |_ | | | |___ ___
| | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \
| |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/
|_____/ \___/ |_| \_|\___/ \__| \____/|___/\___|
This is a temporary interface that can change at any time.
*/
// ====================================================================================================
#include "ConstBundlePrims.h"
namespace omni
{
namespace graph
{
namespace core
{
class BundlePrims;
class BundlePrimIterator;
class BundlePrimAttrIterator;
/**
* Collection of read-write attributes in a primitive.
*/
class BundlePrim : public ConstBundlePrim
{
public:
using AttrMapIteratorType = BundleAttributeMap::iterator;
/**
* @return Parent of this bundle prim.
*/
BundlePrims* getBundlePrims() noexcept;
/**
* @return Bundle handle of this primitive.
*/
BundleHandle handle() noexcept;
/**
* Sets path of the primitive.
*/
void setPath(NameToken path) noexcept;
/**
* Sets type of the primitive.
*/
void setType(NameToken type) noexcept;
/**
* @return Cached instance of BundleAttrib if attribute is found successfully, nullptr otherwise.
*/
BundleAttrib* getAttr(NameToken attrName) noexcept;
/**
* @return BundleAttrib if attribute is added successfully, nullptr otherwise.
*/
BundleAttrib* addAttr(NameToken attrName,
Type type,
size_t arrayElementCount = 0,
BundleAttrib::Source source = BundleAttrib::Source::Attribute) noexcept;
/**
* Convenience structure for adding attributes.
*/
struct AddAttrInfo
{
NameToken attrName;
Type type;
size_t arrayElementCount;
BundleAttrib::Source source;
};
/**
* Adds a list of attributes to this bundle prim.
*
* @param[in] attrList Vector of all the new attributes to be added to this prim
* @returns True if all (new) attributes were added successfully
*
* @todo Weakness of this interface is that it forces usage of std::vector.
*/
bool addAttrs(std::vector<AddAttrInfo> const& attrList) noexcept;
/**
* Remove attribute with a given name from this primitive.
*/
void removeAttr(NameToken attrName) noexcept;
/**
* Recursively remove all attributes from this primitive.
*/
void clearContents() noexcept;
/**
* Copy contents from another bundle prim.
*/
void copyContentsFrom(ConstBundlePrim& source, bool removeAttrsNotInSource = true) noexcept;
[[deprecated("Bumping DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]]
void bumpDirtyID() noexcept {}
[[deprecated("Setting DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]]
void setDirtyID(DirtyIDType dirtyID) noexcept {}
/**
* @return Attribute iterator pointing to the first attribute in this bundle.
*/
BundlePrimAttrIterator begin() noexcept;
/**
* @return Attribute iterator pointing to the last attribute in this bundle.
*/
BundlePrimAttrIterator end() noexcept;
/**
* @return Attribute iterator pointing to the first attribute in this bundle.
*/
ConstBundlePrimAttrIterator cbegin() noexcept;
/**
* @return Attribute iterator pointing to the last attribute in this bundle.
*/
ConstBundlePrimAttrIterator cend() noexcept;
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
/**
* Create an attribute that is a relationship type.
*/
BundleAttrib* addRelationship(NameToken name, size_t targetCount) noexcept;
/**
* @deprecated Use getBundlePrims.
*/
BundlePrims* bundlePrims() noexcept;
/**
* @deprecated Do not use!
*/
void copyContentsFrom(ConstBundlePrim const& source, bool removeAttrsNotInSource = true) noexcept;
private:
/**
* Direct initialization with IBundle interface.
*
* ConstBundlePrim and BundlePrim take advantage of polymorphic relationship
* between IConstBundle and IBundle interfaces.
* In order to modify bundles, BundlePrim makes an attempt to cast IConstBundle
* to IBundle interface. When this process is successful then, bundle can be modified.
*
* Only BundlePrims is allowed to create instances of BundlePrim.
*/
BundlePrim(BundlePrims& bundlePrims, omni::core::ObjectPtr<IBundle2> bundle);
/**
* Clear contents of IBundle.
*/
void recursiveClearContents(GraphContextObj const& context,
IBundleFactory* factory,
IBundle2* bundle) noexcept;
/**
* @return Make an attempt to cast IConstBundle interface to IBundle. Returns nullptr if operation failed.
*/
IBundle2* getBundlePtr(IConstBundle2* constBundle) noexcept;
/**
* @return Make an attempt to cast IConstBundle interface to IBundle. Returns nullptr if operation failed.
*/
IBundle2* getBundlePtr() noexcept;
/**
* @return True if primitive is an instance of common attributes.
*/
bool isCommonAttrs() const noexcept
{
BundlePrimIndex primIndex = static_cast<ConstBundlePrim*>(const_cast<BundlePrim*>(this))->primIndex();
return primIndex == kInvalidBundlePrimIndex;
}
friend class BundlePrimIterator;
friend class BundlePrims;
friend class BundleAttrib;
};
/**
* Collection of read-write primitives in a bundle.
*
* Bundle Primitives is not movable, not copyable. It lifespan is managed by the user.
*/
class BundlePrims : public ConstBundlePrims
{
public:
/**
* Acquire access to a bundle primitives under given handle.
*/
BundlePrims(GraphContextObj const& context, BundleHandle const& bundle);
~BundlePrims() noexcept;
/**
* @return Bundle handle of this primitive.
*/
BundleHandle handle() noexcept;
/**
* @return BundlePrim under given index, or nullptr if prim is not found.
*/
BundlePrim* getPrim(BundlePrimIndex primIndex) noexcept;
/**
* @return BundlePrim allowing access to attributes to this bundle primitives.
*/
BundlePrim& getCommonAttrs() noexcept;
/**
* Add new primitives to this bundle.
*
* @return Number of successfully added primitives.
*/
size_t addPrims(size_t primCountToAdd) noexcept;
/**
* Remove primitive under given index.
*/
bool removePrim(BundlePrimIndex primIndex) noexcept;
/**
* Cleans up this primitive bundle. Remove all primitives and attributes.
*/
void clearContents() noexcept;
[[deprecated("Bumping DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]]
DirtyIDType bumpBundleDirtyID() noexcept
{
return kInvalidDirtyID;
}
/**
* @return Primitive iterator pointing to the first primitive in this bundle.
*/
BundlePrimIterator begin() noexcept;
/**
* @return Primitive iterator pointing to the last primitive in this bundle.
*/
BundlePrimIterator end() noexcept;
/**
* @return Primitive iterator pointing to the first primitive in this bundle.
*/
ConstBundlePrimIterator cbegin() noexcept;
/**
* @return Primitive iterator pointing to the last primitive in this bundle.
*/
ConstBundlePrimIterator cend() noexcept;
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
/**
* @deprecated Don't use! Read attach() description.
*/
BundlePrims();
/**
* @deprecated Use appropriate constructor and heap allocate BundlePrims.
*
* @todo: There is no benefit of using this method. Cache has to be rebuild from scratch
* whenever BundlePrims is attached/detached.
* It would be better to remove default constructor and enforce cache construction
* through constructor with arguments.
*/
void attach(GraphContextObj const& context, BundleHandle const& bundle) noexcept;
/**
* @deprecated Use appropriate constructor and heap allocate ConstBundlePrims.
*/
void detach() noexcept;
/**
* @deprecated Do not use! Use removePrim with index. This override introduces ambiguity where int can
* be converted to a pointer.
*
* @todo: Weakness of removePrim design is that it introduces two overrides with following arguments:
* * pointer
* * integer
* This leads to ambiguity during override resolution. Override with a pointer should be avoided
* and removed in the future.
*/
bool removePrim(ConstBundlePrim* prim) noexcept;
/**
* @deprecated Do not use! There is no need for this function to exist.
* Get the primitive and call clearContents().
*/
BundlePrim* getClearedPrim(BundlePrimIndex primIndex) noexcept;
/**
* @deprecated Responsibility to cache primitive's attributes has been moved to BundlePrim.
*/
void ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept;
private:
/**
* @return Returns nullptr if bundle is read only, or IBundle2 instance otherwise.
*/
IBundle2* getBundlePtr() noexcept;
// cached attribute handles
AttributeDataHandle m_bundlePrimIndexOffsetAttr{ AttributeDataHandle::invalidValue() };
friend class BundlePrim;
friend class BundleAttrib;
};
/**
* Primitives in Bundle iterator.
*/
class BundlePrimIterator
{
public:
BundlePrimIterator(BundlePrims& bundlePrims, BundlePrimIndex primIndex = 0) noexcept;
BundlePrimIterator(BundlePrimIterator const& that) noexcept = default;
BundlePrimIterator& operator=(BundlePrimIterator const& that) noexcept = default;
bool operator==(BundlePrimIterator const& that) const noexcept;
bool operator!=(BundlePrimIterator const& that) const noexcept;
BundlePrim& operator*() noexcept;
BundlePrim* operator->() noexcept;
BundlePrimIterator& operator++() noexcept;
private:
BundlePrims* m_bundlePrims;
BundlePrimIndex m_primIndex;
};
/**
* Attributes in Primitive iterator.
*/
class BundlePrimAttrIterator
{
public:
BundlePrimAttrIterator(BundlePrim& bundlePrim, BundlePrim::AttrMapIteratorType attrIter) noexcept;
BundlePrimAttrIterator(BundlePrimAttrIterator const& that) noexcept = default;
BundlePrimAttrIterator& operator=(BundlePrimAttrIterator const& that) noexcept = default;
bool operator==(BundlePrimAttrIterator const& that) const noexcept;
bool operator!=(BundlePrimAttrIterator const& that) const noexcept;
BundleAttrib& operator*() noexcept;
BundleAttrib* operator->() noexcept;
BundlePrimAttrIterator& operator++() noexcept;
BundleAttrib const* getConst() noexcept;
private:
BundlePrim* m_bundlePrim;
BundlePrim::AttrMapIteratorType m_attrIter;
};
} // namespace core
} // namespace graph
} // namespace omni
#include "BundlePrimsImpl.h"
|
omniverse-code/kit/include/omni/graph/core/IVariable.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/core/IObject.h>
#include <omni/graph/core/Type.h>
#include <omni/graph/core/Handle.h>
namespace omni
{
namespace graph
{
namespace core
{
/**
* Scope in which the variable has been made available
*/
enum class eVariableScope
{
/** Variable is accessible only to its graph */
ePrivate = 0,
/** Variable can be read by other graphs */
eReadOnly = 1,
/** Variable can be read/written by other graphs */
ePublic = 2,
};
//! Declare the IVariable interface definition
OMNI_DECLARE_INTERFACE(IVariable);
//! Data type to use for a reference to an IVariable interface definition
using IVariablePtr = omni::core::ObjectPtr<IVariable>;
/**
* Object that contains a value that is local to a graph, available from anywhere in the graph
*/
class IVariable_abi
: public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.IVariable")>
{
protected:
/**
* Returns the name of the variable object. The name is derived by
* removing any variable specific prefixes from the underlying attribute.
*
* @return The name of the variable.
*/
virtual const char* getName_abi() noexcept = 0;
/**
* Returns the full path to the variables underlying attribute
*
* @return The full usd path of the variable
*/
virtual const char* getSourcePath_abi() noexcept = 0;
/**
* Returns the type of the variable
*
* @return The type of the variable
*/
virtual OMNI_ATTR("no_py") Type getType_abi() noexcept = 0;
/**
* Returns the category of the variable
*
* @return The category of the variable, or an empty string if it is not set.
*/
virtual const char* getCategory_abi() noexcept = 0;
/**
* Sets the category of the variable
*
* @param[in] category A string representing the variable category
*/
virtual void setCategory_abi(OMNI_ATTR("c_str, in, not_null") const char* category) noexcept = 0;
/**
* Gets the display name of the variable. By default the display name is the same
* as the variable name.
*
* @return The display name of the variable, or an empty string if it is not set.
*/
virtual const char* getDisplayName_abi() noexcept = 0;
/**
* Set the display name of the variable.
*
* @param[in] displayName A string to set the display name to
*/
virtual void setDisplayName_abi(OMNI_ATTR("c_str, in, not_null") const char* displayName) noexcept = 0;
/**
* Get the tooltip used for the variable.
*
* @return The tooltip of the variable, or an emtpy string if none is set.
*/
virtual const char* getTooltip_abi() noexcept = 0;
/**
* Set the tooltip used for the variable
*
* @param[in] toolTip A description used as a tooltip.
*/
virtual void setTooltip_abi(OMNI_ATTR("c_str, in, not_null") const char* toolTip) noexcept = 0;
/**
* Get the scope of the variable. The scope determines which graphs can read and write the value.
*
* @return The scope of the variable.
*/
virtual eVariableScope getScope_abi() noexcept = 0;
/**
* Sets the scope of the variable.
*
* @param[in] scope The scope to set on the variable.
*/
virtual void setScope_abi(eVariableScope scope) noexcept = 0;
/**
* Returns whether this variable is valid
*
* @return True if the variable is valid, false otherwise
*/
virtual bool isValid_abi() noexcept = 0;
};
} // namespace core
} // namespace graph
} // namespace omni
#include "IVariable.gen.h"
//! @cond Doxygen_Suppress
//!
//! API part of the variable interface
//! @copydoc omni::graph::core::IVariable_abi
OMNI_DEFINE_INTERFACE_API(omni::graph::core::IVariable)
//! @endcond
{
public:
/**
* Changes the type of the variable.
*
* @param[in] type The type to change the variable to
* @returns True if the type was successfully changed, False otherwise. Setting the type
* can fail if the backing USD change is on a layer with a weaker opinion.
*/
inline bool setType(omni::graph::core::Type type) noexcept;
};
|
omniverse-code/kit/include/omni/graph/core/PyISchedulingHints.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindeAccessType(py::module& m)
{
py::enum_<omni::graph::core::eAccessType> e(
m, "eAccessType",
R"OMNI_BIND_RAW_(How does the node access the data described by the enum eAccessLocation)OMNI_BIND_RAW_");
e.value("E_NONE", omni::graph::core::eAccessType::eNone,
R"OMNI_BIND_RAW_(There is no access to data of the associated type)OMNI_BIND_RAW_");
e.value("E_READ", omni::graph::core::eAccessType::eRead,
R"OMNI_BIND_RAW_(There is only read access to data of the associated type)OMNI_BIND_RAW_");
e.value("E_WRITE", omni::graph::core::eAccessType::eWrite,
R"OMNI_BIND_RAW_(There is only write access to data of the associated type)OMNI_BIND_RAW_");
e.value("E_READ_WRITE", omni::graph::core::eAccessType::eReadWrite,
R"OMNI_BIND_RAW_(There is both read and write access to data of the associated type)OMNI_BIND_RAW_");
return e;
}
auto bindeAccessLocation(py::module& m)
{
py::enum_<omni::graph::core::eAccessLocation> e(
m, "eAccessLocation", R"OMNI_BIND_RAW_(What type of non-attribute data does this node access)OMNI_BIND_RAW_");
e.value("E_USD", omni::graph::core::eAccessLocation::eUsd,
R"OMNI_BIND_RAW_(Accesses the USD stage data)OMNI_BIND_RAW_");
e.value("E_GLOBAL", omni::graph::core::eAccessLocation::eGlobal,
R"OMNI_BIND_RAW_(Accesses data that is not part of the node or node type)OMNI_BIND_RAW_");
e.value("E_STATIC", omni::graph::core::eAccessLocation::eStatic,
R"OMNI_BIND_RAW_(Accesses data that is shared by every instance of a particular node type)OMNI_BIND_RAW_");
e.value("E_TOPOLOGY", omni::graph::core::eAccessLocation::eTopology,
R"OMNI_BIND_RAW_(Accesses information on the topology of the graph to which the node belongs)OMNI_BIND_RAW_");
return e;
}
auto bindeThreadSafety(py::module& m)
{
py::enum_<omni::graph::core::eThreadSafety> e(
m, "eThreadSafety", R"OMNI_BIND_RAW_(How thread safe is the node during evaluation)OMNI_BIND_RAW_");
e.value("E_SAFE", omni::graph::core::eThreadSafety::eSafe,
R"OMNI_BIND_RAW_(Nodes can be evaluated in multiple threads safely)OMNI_BIND_RAW_");
e.value("E_UNSAFE", omni::graph::core::eThreadSafety::eUnsafe,
R"OMNI_BIND_RAW_(Nodes cannot be evaluated in multiple threads safely)OMNI_BIND_RAW_");
e.value("E_UNKNOWN", omni::graph::core::eThreadSafety::eUnknown,
R"OMNI_BIND_RAW_(The thread safety status of the node type is unknown)OMNI_BIND_RAW_");
return e;
}
auto bindeComputeRule(py::module& m)
{
py::enum_<omni::graph::core::eComputeRule> e(
m, "eComputeRule", R"OMNI_BIND_RAW_(How the node is allowed to be computed)OMNI_BIND_RAW_");
e.value("E_DEFAULT", omni::graph::core::eComputeRule::eDefault,
R"OMNI_BIND_RAW_(Nodes are computed according to the default evaluator rules)OMNI_BIND_RAW_");
e.value(
"E_ON_REQUEST", omni::graph::core::eComputeRule::eOnRequest,
R"OMNI_BIND_RAW_(The evaluator may skip computing this node until explicitly requested with INode::requestCompute)OMNI_BIND_RAW_");
return e;
}
auto bindISchedulingHints(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::core::ISchedulingHints_abi>,
omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::ISchedulingHints_abi>>,
omni::core::IObject>
clsParent(m, "_ISchedulingHints");
py::class_<omni::graph::core::ISchedulingHints, omni::core::Generated<omni::graph::core::ISchedulingHints_abi>,
omni::python::detail::PyObjectPtr<omni::graph::core::ISchedulingHints>, omni::core::IObject>
cls(m, "ISchedulingHints",
R"OMNI_BIND_RAW_(Interface to the list of scheduling hints that can be applied to a node type)OMNI_BIND_RAW_");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::core::ISchedulingHints>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::core::ISchedulingHints>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::core::ISchedulingHints instantiation");
}
return tmp;
}));
cls.def_property("thread_safety", &omni::graph::core::ISchedulingHints::getThreadSafety,
&omni::graph::core::ISchedulingHints::setThreadSafety);
cls.def_property("compute_rule", &omni::graph::core::ISchedulingHints::getComputeRule,
&omni::graph::core::ISchedulingHints::setComputeRule);
cls.def("get_data_access", &omni::graph::core::ISchedulingHints::getDataAccess,
R"OMNI_BIND_RAW_(Get the type of access the node has for a given data type
@param[in] dataType Type of data for which access type is being modified
@returns Value of the access type flag)OMNI_BIND_RAW_",
py::arg("data_type"));
cls.def("set_data_access", &omni::graph::core::ISchedulingHints::setDataAccess,
R"OMNI_BIND_RAW_(Set the flag describing how a node accesses particular data in its compute _abi (defaults to no access).
Setting any of these flags will, in most cases, automatically mark the node as "not threadsafe".
One current exception to this is allowing a node to be both threadsafe and a writer to USD, since
such behavior can be achieved if delayed writebacks (e.g. "registerForUSDWriteBack") are utilized
in the node's compute method.
@param[in] dataType Type of data for which access type is being modified
@param[in] newAccessType New value of the access type flag)OMNI_BIND_RAW_",
py::arg("data_type"), py::arg("new_access_type"));
cls.def("inspect",
[](omni::graph::core::ISchedulingHints* self, omni::inspect::IInspector* inspector)
{
auto return_value = self->inspect(inspector);
return return_value;
},
R"OMNI_BIND_RAW_(Runs the inspector on the scheduling hints.
@param[in] inspector The inspector class
@return true if the inspection ran successfully, false if the inspection type is not supported)OMNI_BIND_RAW_",
py::arg("inspector"));
return omni::python::PyBind<omni::graph::core::ISchedulingHints>::bind(cls);
}
|
omniverse-code/kit/include/omni/graph/core/ConstBundlePrimsImpl.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "ConstBundlePrims.h"
#include <omni/graph/core/CppWrappers.h>
#include <omni/graph/core/iComputeGraph.h>
#include <omni/graph/core/ComputeGraph.h>
#include <algorithm>
namespace omni
{
namespace graph
{
namespace core
{
// ====================================================================================================
//
// Const Bundle Primitive
//
// ====================================================================================================
inline ConstBundlePrim::ConstBundlePrim(ConstBundlePrims& bundlePrims, omni::core::ObjectPtr<IConstBundle2> bundle)
: m_bundlePrims{ &bundlePrims }, m_bundle{ std::move(bundle) }
{
// Read and cache all non internal attributes.
readAndCacheAttributes();
const detail::AttrDefinition& primIndexDef = detail::getPrimIndexDefinition();
m_primIndexAttr = getConstBundlePtr()->getConstBundleMetadataByName(primIndexDef.token);
}
inline ConstBundleHandle ConstBundlePrim::getConstHandle() noexcept
{
return m_bundle->getConstHandle();
}
inline void ConstBundlePrim::readAndCacheAttributes() noexcept
{
IConstBundle2* bundle = getConstBundlePtr();
GraphContextObj const& context = getConstBundlePrims()->context();
std::vector<ConstAttributeDataHandle> attrHandles(bundle->getAttributeCount());
bundle->getConstAttributes(attrHandles.data(), attrHandles.size());
auto& attrs = getAttributes();
for(ConstAttributeDataHandle& attrHandle : attrHandles)
{
if(!attrHandle.isValid())
continue;
NameToken attrName = context.iAttributeData->getName(context, attrHandle);
attrs.insert(std::make_pair(attrName, std::make_unique<BundleAttrib>(*this, attrName)));
}
}
inline BundleAttrib const* ConstBundlePrim::getConstAttr(core::NameToken attrName) noexcept
{
// Try to find cached attributes
auto& attrMap = getAttributes();
auto it = attrMap.find(attrName);
if (it != attrMap.end())
{
return it->second.get();
}
// Try to find attribute in this bundle.
IConstBundle2* bundle = getConstBundlePtr();
ConstAttributeDataHandle attributeHandle = bundle->getConstAttributeByName(attrName);
if (!attributeHandle.isValid())
{
// attribute is not found, ensure entry is removed from the cache.
auto it = attrMap.find(attrName);
if (it != attrMap.end())
{
attrMap.erase(it);
}
return nullptr;
}
// Check if attribute in the bundle is stale
auto newPrimAttribute = new BundleAttrib{ *this, attrName};
std::unique_ptr<BundleAttrib> primAttributePtr{ newPrimAttribute };
attrMap.emplace(attrName, std::move(primAttributePtr));
return newPrimAttribute;
}
inline BundleAttrib const* ConstBundlePrim::getAttr(NameToken attrName) const noexcept
{
return const_cast<ConstBundlePrim*>(this)->getConstAttr(attrName);
}
inline size_t ConstBundlePrim::attrCount() noexcept
{
return getAttributes().size();
}
inline BundlePrimIndex ConstBundlePrim::primIndex() noexcept
{
if (m_primIndexAttr.isValid())
{
ConstBundlePrims* bundlePrims = getConstBundlePrims();
return *getDataR<BundlePrimIndex>(bundlePrims->context(), m_primIndexAttr);
}
return kInvalidBundlePrimIndex;
}
inline NameToken ConstBundlePrim::path() noexcept
{
if (!m_pathAttr.isValid())
{
const detail::AttrDefinition& attrDef = detail::getPrimPathDefinition();
m_pathAttr = getConstBundlePtr()->getConstAttributeByName(attrDef.token);
}
NameToken result = omni::fabric::kUninitializedToken;
if (m_pathAttr.isValid())
{
ConstBundlePrims* bundlePrims = getConstBundlePrims();
result = *getDataR<NameToken>(bundlePrims->context(), m_pathAttr);
}
return result;
}
inline NameToken ConstBundlePrim::path() const noexcept
{
return const_cast<ConstBundlePrim*>(this)->path();
}
inline NameToken ConstBundlePrim::type() noexcept
{
if (!m_typeAttr.isValid())
{
const detail::AttrDefinition& attrDef = detail::getPrimTypeDefinition();
m_typeAttr = getConstBundlePtr()->getConstAttributeByName(attrDef.token);
}
NameToken result = omni::fabric::kUninitializedToken;
if (m_typeAttr.isValid())
{
ConstBundlePrims* bundlePrims = getConstBundlePrims();
result = *getDataR<NameToken>(bundlePrims->context(), m_typeAttr);
}
return result;
}
inline NameToken ConstBundlePrim::type() const noexcept
{
return const_cast<ConstBundlePrim*>(this)->type();
}
inline DirtyIDType ConstBundlePrim::dirtyID() noexcept
{
auto id = carb::getCachedInterface<ComputeGraph>()->getDirtyIDInterfacePtr(m_bundle->getContext());
return id->getForBundle(m_bundle->getConstHandle());
}
inline DirtyIDType ConstBundlePrim::dirtyID() const noexcept
{
CARB_IGNOREWARNING_MSC_WITH_PUSH(4996)
CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wdeprecated-declarations")
return const_cast<ConstBundlePrim*>(this)->dirtyID();
CARB_IGNOREWARNING_GNUC_POP
CARB_IGNOREWARNING_MSC_POP
}
inline ConstBundlePrims* ConstBundlePrim::getConstBundlePrims() noexcept
{
return m_bundlePrims;
}
inline ConstBundlePrimAttrIterator ConstBundlePrim::begin() noexcept
{
return ConstBundlePrimAttrIterator(*this, getAttributes().begin());
}
inline ConstBundlePrimAttrIterator ConstBundlePrim::end() noexcept
{
return ConstBundlePrimAttrIterator(*this, getAttributes().end());
}
inline ConstBundlePrimAttrIterator ConstBundlePrim::begin() const noexcept
{
ConstBundlePrim& thisPrim = const_cast<ConstBundlePrim&>(*this);
return ConstBundlePrimAttrIterator(thisPrim, thisPrim.getAttributes().begin());
}
inline ConstBundlePrimAttrIterator ConstBundlePrim::end() const noexcept
{
ConstBundlePrim& thisPrim = const_cast<ConstBundlePrim&>(*this);
return ConstBundlePrimAttrIterator(thisPrim, thisPrim.getAttributes().end());
}
inline IConstBundle2* ConstBundlePrim::getConstBundlePtr() noexcept
{
return m_bundle.get();
}
inline ConstBundlePrim::BundleAttributeMap& ConstBundlePrim::getAttributes() noexcept
{
return m_attributes;
}
// ====================================================================================================
//
// Const Bundle Primitives
//
// ====================================================================================================
inline ConstBundlePrims::ConstBundlePrims()
{
}
inline ConstBundlePrims::ConstBundlePrims(GraphContextObj const& context,
ConstBundleHandle const& bundle)
: ConstBundlePrims()
{
attach(context, bundle);
}
inline void ConstBundlePrims::detach() noexcept
{
m_primitives.clear();
m_commonAttributes.reset();
m_context = GraphContextObj{};
m_bundle.release();
m_factory.release();
}
inline ConstBundlePrims::BundlePrimArray& ConstBundlePrims::getPrimitives() noexcept
{
return m_primitives;
}
inline ConstBundleHandle ConstBundlePrims::getConstHandle() noexcept
{
return m_bundle->getConstHandle();
}
template <typename FUNC>
ConstBundlePrim* ConstBundlePrims::getConstPrim(BundlePrimIndex primIndex, FUNC createSortedBundlePrims) noexcept
{
// Return invalid const bundle prim if out of bounds.
size_t const bundlePrimCount = getPrimCount();
if (primIndex >= bundlePrimCount)
{
return nullptr;
}
auto& prims = getPrimitives();
// HDC_TODO: we need a clear signal to be sure when creation and resorting is required.
if (prims.size() != bundlePrimCount)
{
prims = std::move(createSortedBundlePrims());
CARB_ASSERT(bundlePrimCount == (size_t)std::count_if(prims.cbegin(), prims.cend(), [](const auto& p) { return p.get(); }));
}
return prims[primIndex].get();
}
inline ConstBundlePrim* ConstBundlePrims::getPrim(BundlePrimIndex primIndex) noexcept
{
return getConstPrim(primIndex);
}
inline ConstBundlePrim* ConstBundlePrims::getConstPrim(BundlePrimIndex primIndex) noexcept
{
auto createSortedBundlePrims = [this, &bundlePrims = *this]() -> BundlePrimArray
{
const size_t childBundleCount = getConstBundlePtr()->getChildBundleCount();
std::vector<ConstBundleHandle> handles(childBundleCount);
getConstBundlePtr()->getConstChildBundles(handles.data(), handles.size());
const GraphContextObj& graphContext = context();
BundlePrimArray prims(childBundleCount);
BundlePrimArray nonIndexedPrims;
for (ConstBundleHandle& handle : handles)
{
auto childBundle = getBundleFactoryPtr()->getConstBundle(graphContext, handle);
ConstBundlePrim* prim = new ConstBundlePrim(bundlePrims, childBundle);
BundlePrimIndex index = prim->primIndex();
CARB_ASSERT(index < childBundleCount || index == kInvalidBundlePrimIndex);
if (index < childBundleCount)
{
prims[index].reset(prim);
}
else
{
nonIndexedPrims.emplace_back(prim);
}
}
// Merge non-indexed prims into the sorted array.
if (!nonIndexedPrims.empty())
{
BundlePrimIndex index = 0;
for (ConstBundlePrimPtr& nonIndexedPrim : nonIndexedPrims)
{
while (index < childBundleCount)
{
ConstBundlePrimPtr& prim = prims[index++];
if (!prim)
{
prim = std::move(nonIndexedPrim);
break;
}
}
}
}
return prims;
};
return getConstPrim(primIndex, createSortedBundlePrims);
}
inline DirtyIDType ConstBundlePrims::getBundleDirtyID() noexcept
{
CARB_IGNOREWARNING_MSC_WITH_PUSH(4996)
CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wdeprecated-declarations")
return getCommonAttrs().dirtyID();
CARB_IGNOREWARNING_GNUC_POP
CARB_IGNOREWARNING_MSC_POP
}
inline ConstBundlePrim& ConstBundlePrims::getConstCommonAttrs() noexcept
{
return *m_commonAttributes;
}
inline GraphContextObj const& ConstBundlePrims::context() noexcept
{
if (m_bundle)
{
m_context = m_bundle->getContext();
}
else
{
m_context = GraphContextObj{};
}
return m_context;
}
inline void ConstBundlePrims::attach(GraphContextObj const& context,
ConstBundleHandle const& bundleHandle) noexcept
{
ComputeGraph* computeGraph = carb::getCachedInterface<ComputeGraph>();
omni::core::ObjectPtr<IBundleFactory> factory = computeGraph->getBundleFactoryInterfacePtr();
omni::core::ObjectPtr<IConstBundle2> bundle = factory->getConstBundle(context, bundleHandle);
attach(std::move(factory), std::move(bundle));
}
inline void ConstBundlePrims::attach(omni::core::ObjectPtr<IBundleFactory>&& factoryPtr,
omni::core::ObjectPtr<IConstBundle2>&& bundlePtr) noexcept
{
// Initialize members
m_factory = std::move(factoryPtr);
m_bundle = std::move(bundlePtr);
// Initialize common attributes to provide access to ConstBundlePrims attributes.
m_commonAttributes.reset(new ConstBundlePrim(*this, m_bundle));
if (!m_bundle->isValid())
{
return;
}
// TODO: Following code is necessary for backward compatibility.
IConstBundle2* bundle = getConstBundlePtr();
GraphContextObj const& context = this->context();
}
inline IBundleFactory* ConstBundlePrims::getBundleFactoryPtr() noexcept
{
return m_factory.get();
}
inline IConstBundle2* ConstBundlePrims::getConstBundlePtr() noexcept
{
return m_bundle.get();
}
inline size_t ConstBundlePrims::getPrimCount() noexcept
{
if (IConstBundle2* bundle = getConstBundlePtr())
{
return bundle->getChildBundleCount();
}
return 0;
}
inline ConstBundlePrimIterator ConstBundlePrims::begin() noexcept
{
return ConstBundlePrimIterator(*this);
}
inline ConstBundlePrimIterator ConstBundlePrims::end() noexcept
{
return ConstBundlePrimIterator(*this, getPrimCount());
}
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future, but are kept for backward compatibility.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
inline ConstBundlePrim& ConstBundlePrims::getCommonAttrs() noexcept
{
return getConstCommonAttrs();
}
inline ConstBundleHandle ConstBundlePrims::handle() noexcept
{
return m_bundle->getConstHandle();
}
inline void ConstBundlePrims::separateAttrs() noexcept
{
// There is nothing to separate. This function is deprecated.
}
inline void ConstBundlePrims::ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept
{
// Responsibility of caching attributes was moved to Bundle Prim.
}
// ====================================================================================================
//
// Const Bundle Primitive Iterator
//
// ====================================================================================================
inline ConstBundlePrimIterator::ConstBundlePrimIterator(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex) noexcept
: m_bundlePrims(&bundlePrims), m_primIndex(primIndex)
{
}
inline bool ConstBundlePrimIterator::operator==(ConstBundlePrimIterator const& that) const noexcept
{
return m_bundlePrims == that.m_bundlePrims && m_primIndex == that.m_primIndex;
}
inline bool ConstBundlePrimIterator::operator!=(ConstBundlePrimIterator const& that) const noexcept
{
return !(*this == that);
}
inline ConstBundlePrim& ConstBundlePrimIterator::operator*() noexcept
{
return *(m_bundlePrims->getConstPrim(m_primIndex));
}
inline ConstBundlePrim* ConstBundlePrimIterator::operator->() noexcept
{
return m_bundlePrims->getConstPrim(m_primIndex);
}
inline ConstBundlePrimIterator& ConstBundlePrimIterator::operator++() noexcept
{
++m_primIndex;
return *this;
}
// ====================================================================================================
//
// Const Bundle Primitive Attribute Iterator
//
// ====================================================================================================
inline ConstBundlePrimAttrIterator::ConstBundlePrimAttrIterator(ConstBundlePrim& bundlePrim, ConstBundlePrim::AttrMapIteratorType attrIter) noexcept
: m_bundlePrim(&bundlePrim), m_attrIter(attrIter)
{
}
inline bool ConstBundlePrimAttrIterator::operator==(ConstBundlePrimAttrIterator const& that) const noexcept
{
return m_bundlePrim == that.m_bundlePrim && m_attrIter == that.m_attrIter;
}
inline bool ConstBundlePrimAttrIterator::operator!=(ConstBundlePrimAttrIterator const& that) const noexcept
{
return !(*this == that);
}
inline BundleAttrib const& ConstBundlePrimAttrIterator::operator*() const noexcept
{
CARB_ASSERT(m_attrIter->second);
return *(m_attrIter->second);
}
inline BundleAttrib const*ConstBundlePrimAttrIterator:: operator->() const noexcept
{
CARB_ASSERT(m_attrIter->second);
return m_attrIter->second.get();
}
inline ConstBundlePrimAttrIterator& ConstBundlePrimAttrIterator::operator++() noexcept
{
++m_attrIter;
return *this;
}
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/IGatherPrototype.h | // Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/flatcache/IFlatcache.h>
#include <omni/graph/core/iComputeGraph.h>
#include <tuple>
namespace carb
{
namespace flatcache
{
using ArrayIndex = size_t;
}
}
namespace omni {
namespace graph {
namespace core {
// ====================================================================================================
// This is a temporary ABI interface used for prototyping the use of improved Gather. Some of these functions
// may move to other interfaces once the prototyping is complete. For now, none of them should be used outside
// of the prototyping efforts.
/* _____ _ _ _ _ _
| __ \ | \ | | | | | | | |
| | | | ___ | \| | ___ | |_ | | | |___ ___
| | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \
| |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/
|_____/ \___/ |_| \_|\___/ \__| \____/|___/\___|
*/
using PathBucketIndex = std::tuple<carb::flatcache::PathC, carb::flatcache::BucketId, carb::flatcache::ArrayIndex>;
using GatherId = uint64_t;
static constexpr GatherId kInvalidGatherId = 0;
/**
* Gathered attributes can be automatically converted and copied to Hydra fast-path attributes
*/
enum class GatherAddTransformsMode
{
eNone, ///< do not add any attributes
eLocal, ///< _localTransform
eWorld ///< _worldPosition, _worldOrientation, _worldScale
};
struct IGatherPrototype
{
CARB_PLUGIN_INTERFACE("omni::graph::core::IGatherPrototype", 2, 0);
/**
* Adds the given paths to FC and updates the FC sync filter for this Graph. The prims may be tagged to ensure
* the buckets do not contain unrelated prims. The returned GatherId identifies the FC buckets.
*
* After this call, the given paths will be present in FC, in a set of buckets which contain only these paths. This
* is necessary to allow vectorized access to an attribute.
*
* @note The order of the paths supplied to this function may not be the order of the same paths in the Gather. Use
* getGatheredPaths() to get the gather-order of gathered prims.
*
* @note The returned GatherId can become invalidated if the underlying flatcache data changes, use isGatherValid()
* to check.
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] paths array of prim paths to add
* @param[in] numPaths number of elements in paths
* @param[in] allAttributes when true, all USD attributes will be gathered. When false, the "attributes" parameter
* is used.
* @param[in] attributes array of attribute names to add to FC, when allAttributes is false
* @param[in] numAttributes number of elements in attributes array
* @param[in] addTransformsMode The transform attributes to create if any
* @param[in] shouldWriteBack Flag to request to write back cached data to USD
* @param[in] forceExportToHistory when true, all gathered paths will be tagged for being exported into the history
*
* @return The gather id corresponding to the gathered buckets, or kInvalidGatherId on failure.
*/
GatherId(CARB_ABI* gatherPaths)(const GraphContextObj&,
const carb::flatcache::PathC* paths,
size_t numPaths,
bool allAttributes,
NameToken const* attributes,
size_t numAttributes,
GatherAddTransformsMode addTransformsMode,
bool shouldWriteBack,
bool forceExportToHistory);
/**
* Gets the array of paths in the given Gather, in gather order.
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] gatherId The Gather id returned from gatherPaths()
* @param[out] refToPaths reference to the array of prim paths, in gather-order. This pointer is volatile, do not
* save. It can become invalid the next time something changes in flatcache.
* @param[out] refToSize number of elements in refToPaths
*
* @return true if the Gather was found, false if it was not
*/
bool (CARB_ABI*getGatheredPaths)(const GraphContextObj&, GatherId gatherId,
carb::flatcache::PathC const*& refToPaths, size_t& refToSize);
/**
* Gets the array of buckets in the given Gather, in gather order.
* For example if the gathered paths are P1,P2,P3,P4 which spans bucket B1, B2, we know that the sum of the
* sizes of B1 and B2 must be 4. If B1 has 2 entries we know where the paths are located:
*
* B1
* row 0: P1
* row 1: P2
*
* B2
* row 0: P3
* row 1: P4
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] gatherId The Gather id returned from gatherPaths()
* @param[out] refToBucketArray reference to the array of BucketIds, in gather-order. This pointer is volatile, do not
* save. It can become invalid the next time something changes in flatcache.
* @param[out] refToSize number of elements in refToBucketArray
*
* @return true if the Gather was found, false if it was not
*/
bool(CARB_ABI* getGatheredBuckets)(const GraphContextObj&,
GatherId gatherId,
BucketId const*& refToBucketArray,
size_t& refToSize);
/**
* Gets the size information for the given gathered attribute name.
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] gatherId The Gather id returned from gatherPaths()
* @param[in] attributeName name of the gathered attribute
* @param[out] type The type information
* @param[out] baseSizeBytes The size of the base elements of the Type
* FIXME: Should be accessible from flatcache ABI?
* @return true if the attribute was found, false if it was not
*/
bool(CARB_ABI* getGatheredType)(const GraphContextObj&, GatherId gatherId, NameToken attributeName, Type& type, size_t& baseSizeBytes);
// ==============================================================================================================
// The functions below here are temporary replacements for the same functions removed from IGraphContext. They
// were being modified to use attributes, causing a breaking change, and since they were going to be changed anyway
// they were moved to this prototype for easier update later.
// ==============================================================================================================
/**
* Given an attribute connected to a gather node, retrieves the attribute value in system memory
* Deprecated: to be replaced with the 2.0 API soon
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] attrObj The attribute structure to which the operation applies
* @param[in] flags The data access flags indicating whether the data is to be R, W, or RW
* @return the void pointer to the data
*/
void*(CARB_ABI* getGatherArray)(const GraphContextObj& context, const AttributeObj& attrObj, DataAccessFlags flags);
/**
* Given an attribute connected to a gather node, retrieves the attribute value in GPU memory
* Deprecated: to be replaced with the 2.0 API soon
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] attrObj The attribute structure to which the operation applies
* @param[in] flags The data access flags indicating whether the data is to be R, W, or RW
* @return the void pointer to the data
*/
void*(CARB_ABI* getGatherArrayGPU)(const GraphContextObj& context, const AttributeObj& attrObj, DataAccessFlags flags);
/**
* Given an attribute connected to a gather node, retrieves the array of the paths it has gathered
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] attrObj The source attribute structure. This can be any of the gather node's attributes.
* @return the pointer to the path array, encoded as uint64_t. These integers can be cast to SdfPaths.
*/
const uint64_t*(CARB_ABI* getGatherPathArray)(const GraphContextObj& context, const AttributeObj& attrObj);
/**
* Given an array attribute in a gather node, retrieves an array of the array sizes in CPU memory
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] attrObj The source attribute structure.
* @param[in] flags The data access flags indicating whether the data is to be R, W, or RW
* @return the pointer to the array
*/
size_t*(CARB_ABI* getGatherArrayAttributeSizes)(const GraphContextObj& context,
const AttributeObj& attrObj,
DataAccessFlags flags);
/**
* Given an array attribute in a gather node, retrieves an array of the array sizes in GPU memory
* Deprecated: to be replaced with the 2.0 API soon
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] attrObj The source attribute structure.
* @param[in] flags The data access flags indicating whether the data is to be R, W, or RW
* @return the pointer to the array
*/
size_t*(CARB_ABI* getGatherArrayAttributeSizesGPU)(const GraphContextObj& context,
const AttributeObj& attrObj,
DataAccessFlags flags);
/**
* Given a gather node attribute, retrieves the number of elements
* Deprecated: to be replaced with the 2.0 API soon
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] attrObj The attribute structure to which the operation applies
* @return the number of elements gathered, -1 if operation is unsuccessful
*/
size_t(CARB_ABI* getElementCount)(const GraphContextObj& context, const AttributeObj& attrObj);
/**
* Gets the array of repeated paths and where to find its data in the given Gather.
* Each element in the array will have the path of the prim, the bucket id of it's data, and the index of its position inside the bucket
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] gatherId The Gather id returned from gatherPaths()
* @param[out] refToRepeatedPathsArray reference to the array of PathBucketIndex which outlines the location of the
* data for repeated paths in the gater. This pointer is volatile, do not
* save. It can become invalid the next time something changes in flatcache.
* @param[out] refToSize number of elements in refToRepeatedPathsArray
*
* @return true if the Gather was found, false if it was not
*/
bool(CARB_ABI* getGatheredRepeatedPaths)(const GraphContextObj&,
GatherId gatherId,
PathBucketIndex const*& refToRepeatedPathsArray,
size_t& refToSize);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IGatherPrototype, getGatheredRepeatedPaths, 10)
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/CudaUtils.h | // Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// This file contains support for CUDA or CUDA/C++ common code
// TODO: find out how to assert in CUDA
#ifndef __CUDACC__
# define CUDA_SAFE_ASSERT(cond, ...) CARB_ASSERT(cond, ##__VA_ARGS__)
# define CUDA_CALLABLE
#else
# define CUDA_SAFE_ASSERT(cond, ...)
# define CUDA_CALLABLE __device__ __host__
#endif
|
omniverse-code/kit/include/omni/graph/core/IBundleFactory.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "bundle/IBundleFactory2.h"
|
omniverse-code/kit/include/omni/graph/core/IConstBundle.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "bundle/IConstBundle2.h"
|
omniverse-code/kit/include/omni/graph/core/NodeTypeRegistrar.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "iComputeGraph.h"
#include <carb/Framework.h>
#include <iostream>
#include <type_traits>
//====================================================================================================
//
// Node type registration is handled through this set of classes. The implementation is geared towards simplifying
// the registration process as much as possible. All of this is support code for the macro "REGISTER_OGN_NODE()",
// which you place at the bottom of your .ogn file.
//
//====================================================================================================
namespace omni
{
namespace graph
{
namespace core
{
// If a node type version is not specified then it gets this value.
// Keep in sync with DEFAULT_NODE_TYPE_VERSION_DEFAULT in omni.graph.core/python/tests/omnigraph_test_utils.py
static constexpr int kDefaultNodeTypeVersion = 1;
// See this file for how to handle new methods added to the iNodeType interface.
#define NODE_TYPE_REGISTRY // For inclusion protection
#include "NodeTypeRegistryTemplates.h"
// Scoped node registration helper class. Used by the NodeTypeRegistrar to pass in node methods for the
// INodeType ABI, keeping the node type registered for the lifetime of the instantiated object. Only one
// registration object can exist per node type so copy methods are deleted in favour of their move equivalents.
class NodeTypeRegistration
{
public:
// Direct access to the interface, for the manual registration modes
const INodeType& nodeTypeInterface() const
{
return m_interface;
}
// Direct access to the version, for the manual registration modes
const int nodeTypeVersion() const
{
return m_versionNumber;
}
// Move constructor allows passing of registration information through return values without double
// register/unregister
NodeTypeRegistration(NodeTypeRegistration&& rhs) noexcept
{
m_interface = rhs.m_interface;
m_versionNumber = rhs.m_versionNumber;
rhs.m_interface = INodeType{};
rhs.m_versionNumber = kDefaultNodeTypeVersion;
}
// Move operator allows passing of registration information through return values without double register/unregister
NodeTypeRegistration& operator=(NodeTypeRegistration&& rhs) noexcept
{
m_interface = rhs.m_interface;
m_versionNumber = rhs.m_versionNumber;
rhs.m_interface = INodeType{};
rhs.m_versionNumber = kDefaultNodeTypeVersion;
return *this;
}
// Only available constructor takes in the four methods required by the INodeType API and registers
// the node type using that interface. Only the name function is remembered for later unregistration.
NodeTypeRegistration(GetNodeTypeFunction nameFn,
ComputeFunction computeFn,
InitializeFunction initializeFn,
ReleaseFunction releaseFn,
InitializeTypeFunction initializeTypeFn,
UpdateNodeVersionFunction updateNodeVersionFn,
AddInputFunction addInputFn,
AddExtendedInputFunction addExtendedInputFn,
AddOutputFunction addOutputFn,
AddExtendedOutputFunction addExtendedOutputFn,
AddStateFunction addStateFn,
AddExtendedStateFunction addExtendedStateFn,
HasStateFunction hasStateFn,
RegisterTasksFunction registerTasksFn,
GetAllMetadataFunction getAllMetadataFn,
GetMetadataFunction getMetadataFn,
GetMetadataCountFunction getMetadataCountFn,
SetMetadataFunction setMetadataFn,
GetScheduleNodeCountFunction getScheduleNodeCountFn,
GetScheduleNodesFunction getScheduleNodesFn,
OnConnectionTypeResolveFunction onConnectionTypeResolveFn,
InspectFunction inspectFn,
ComputeVectorizedFunction computeVectorized,
ReleaseInstanceFunction releaseInstance,
int versionNumber)
{
m_versionNumber = versionNumber;
m_interface.addInput = addInputFn;
m_interface.addExtendedInput = addExtendedInputFn;
m_interface.addOutput = addOutputFn;
m_interface.addExtendedOutput = addExtendedOutputFn;
m_interface.addState = addStateFn;
m_interface.addExtendedState = addExtendedStateFn;
m_interface.compute = computeFn;
m_interface.getNodeType = nameFn;
m_interface.getScheduleNodes = getScheduleNodesFn;
m_interface.getScheduleNodeCount = getScheduleNodeCountFn;
m_interface.hasState = hasStateFn;
m_interface.initialize = initializeFn;
m_interface.initializeType = initializeTypeFn;
m_interface.registerTasks = registerTasksFn;
m_interface.getAllMetadata = getAllMetadataFn;
m_interface.getMetadata = getMetadataFn;
m_interface.getMetadataCount = getMetadataCountFn;
m_interface.setMetadata = setMetadataFn;
m_interface.release = releaseFn;
m_interface.updateNodeVersion = updateNodeVersionFn;
m_interface.onConnectionTypeResolve = onConnectionTypeResolveFn;
m_interface.inspect = inspectFn;
m_interface.computeVectorized = computeVectorized;
m_interface.releaseInstance = releaseInstance;
m_interface.getCarbABIVersion = []() { return INodeType::getInterfaceDesc().version; };
}
// Node registrations must be unique
NodeTypeRegistration() = delete;
NodeTypeRegistration(const NodeTypeRegistration&) = delete;
NodeTypeRegistration& operator=(const NodeTypeRegistration&) = delete;
private:
INodeType m_interface = {}; // Interface created for this node type
int m_versionNumber{ kDefaultNodeTypeVersion }; // Node type's registered version number
};
// Template class from which nodes can derive to automate their registration.
//
template <typename T>
class NodeTypeRegistrar
{
static const char* s_nodeTypeName; // Name used if the node does not have a getNodeTypeName method
public:
// Register the node type, returning a scoped registration object. Your node type will be registered and
// available for the lifetime of the returned object.
static NodeTypeRegistration registerNode(const char* name,
int versionNumber,
InitializeTypeFunction initializeTypeOverride)
{
s_nodeTypeName = name;
auto nodeTypeNameGetter = getNodeTypeFunction<T>();
if (!nodeTypeNameGetter)
{
nodeTypeNameGetter = &getNodeTypeName;
}
return std::move(NodeTypeRegistration(
nodeTypeNameGetter, computeFunction<T>(), initializeFunction<T>(), releaseFunction<T>(),
initializeTypeOverride ? initializeTypeOverride : initializeTypeFunction<T>(),
updateNodeVersionFunction<T>(), addInputFunction<T>(), addExtendedInputFunction<T>(),
addOutputFunction<T>(), addExtendedOutputFunction<T>(), addStateFunction<T>(), addExtendedStateFunction<T>(),
hasStateFunction<T>(), registerTasksFunction<T>(), getAllMetadataFunction<T>(), getMetadataFunction<T>(),
getMetadataCountFunction<T>(), setMetadataFunction<T>(), getScheduleNodeCountFunction<T>(), getScheduleNodesFunction<T>(),
onConnectionTypeResolveFunction<T>(), inspectFunction<T>(), computeVectorizedFunction<T>(), releaseInstanceFunction<T>(),
versionNumber));
}
static const char* getNodeTypeName()
{
return s_nodeTypeName;
}
};
// When the template instantiates the class it will also instantiate this static member
template <typename NodeType>
const char* NodeTypeRegistrar<NodeType>::s_nodeTypeName{ nullptr };
// Macro that simplifies the syntax of node registration, creating a scoped object for automatic register/unregister
// that can be accessed through the consistently named method "nodeTypeRegistrationMYNODENAME" to call its
// registerNodeType and unregisterNodeType methods at the appropriate time.
#define REGISTER_NODE_TYPE(NODE_CLASS, NODE_TYPE_NAME, NODE_TYPE_VERSION) \
const omni::graph::core::NodeTypeRegistration& nodeTypeRegistration##NODE_CLASS() \
{ \
static omni::graph::core::NodeTypeRegistration s_nodeRegistration{ \
omni::graph::core::NodeTypeRegistrar<NODE_CLASS>::registerNode(NODE_TYPE_NAME, NODE_TYPE_VERSION, nullptr) \
}; \
return s_nodeRegistration; \
}
}
}
}
#define REGISTER_NAMESPACED_NODE_TYPE(NODE_CLASS, NAMESPACE, NODE_TYPE_NAME, NODE_TYPE_VERSION) \
const omni::graph::core::NodeTypeRegistration& nodeTypeRegistration##NODE_CLASS() \
{ \
static omni::graph::core::NodeTypeRegistration s_nodeRegistration{ \
omni::graph::core::NodeTypeRegistrar<NAMESPACE ::NODE_CLASS>::registerNode( \
NODE_TYPE_NAME, NODE_TYPE_VERSION, nullptr) \
}; \
return s_nodeRegistration; \
}
|
omniverse-code/kit/include/omni/graph/core/IBundleFactory.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
template <>
class omni::core::Generated<omni::graph::core::IBundleFactory_abi> : public omni::graph::core::IBundleFactory_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::IBundleFactory")
/**
* Create bundles at given paths and acquire instances of IBundle2 interface.
*
* @param[in] contextObj The context where bundles are created.
* @param[in] paths Locations for new bundles.
* @param[in] pathCount Length of paths array.
* @param[out] createdBundles Output instances of IBundle2 interface.
* @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
* invalid.
*/
omni::core::Result createBundles(const omni::graph::core::GraphContextObj* const contextObj,
const carb::flatcache::PathC* const paths,
size_t pathCount,
omni::graph::core::IBundle2** const createdBundles) noexcept;
/**
* Acquire instances of IConstBundle2 interface from const bundle handles.
*
* @param[in] contextObj The context where bundles belong to.
* @param[in] bundleHandles The bundle handles.
* @param[in] bundleCount Length of bundleHandles array.
* @param[out] bundles Output instances of IConstBundle2 interface.
* @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
* invalid.
*/
omni::core::Result getConstBundles(const omni::graph::core::GraphContextObj* const contextObj,
const omni::graph::core::ConstBundleHandle* const bundleHandles,
size_t bundleCount,
omni::graph::core::IConstBundle2** const bundles) noexcept;
/**
* Acquire instances of IBundle2 interface from bundle handles.
*
* @param[in] contextObj The context where bundles belong to.
* @param[in] bundleHandles The bundle handles.
* @param[in] bundleCount Length of bundleHandles array.
* @param[out] bundles Output instances of IConstBundle2 interface.
* @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
* invalid.
*/
omni::core::Result getBundles(const omni::graph::core::GraphContextObj* const contextObj,
const omni::graph::core::BundleHandle* const bundleHandles,
size_t bundleCount,
omni::graph::core::IBundle2** const bundles) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory_abi>::createBundles(
const omni::graph::core::GraphContextObj* const contextObj,
const carb::flatcache::PathC* const paths,
size_t pathCount,
omni::graph::core::IBundle2** const createdBundles) noexcept
{
return createBundles_abi(contextObj, paths, pathCount, createdBundles);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory_abi>::getConstBundles(
const omni::graph::core::GraphContextObj* const contextObj,
const omni::graph::core::ConstBundleHandle* const bundleHandles,
size_t bundleCount,
omni::graph::core::IConstBundle2** const bundles) noexcept
{
return getConstBundles_abi(contextObj, bundleHandles, bundleCount, bundles);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory_abi>::getBundles(
const omni::graph::core::GraphContextObj* const contextObj,
const omni::graph::core::BundleHandle* const bundleHandles,
size_t bundleCount,
omni::graph::core::IBundle2** const bundles) noexcept
{
return getBundles_abi(contextObj, bundleHandles, bundleCount, bundles);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/core/INodeCategories.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/core/IObject.h>
namespace omni {
namespace graph {
namespace core {
//! Declare the INodeCategories interface definition
OMNI_DECLARE_INTERFACE(INodeCategories);
/** Interface to the list of categories that a node type can belong to */
class INodeCategories_abi
: public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.INodeCategories")>
{
protected:
/**
* Get the number of categories available
*
* @returns Count of fixed category types
*/
virtual size_t getCategoryCount_abi() noexcept = 0;
/**
* Get the list of available categories and their descriptions.
*
* The caller is responsible for allocating and destroying buffers large enough to hold "bufferSize" results.
* If bufferSize > getCategoryCount() then the entries at the ends of the buffers will be filled with nullptr.
*
* @param[in] categoryNameBuffer List of category names
* @param[in] categoryDescriptionBuffer List of category descriptions corresponding to the names
* @param[in] bufferSize Number of entries to fill in the buffers
*
* @return true if the category buffer was successfully filled and the bufferSize matched the category count
*/
virtual OMNI_ATTR("no_py") bool getCategories_abi(
OMNI_ATTR("*c_str, out, not_null, count=bufferSize") char const** categoryNameBuffer,
OMNI_ATTR("*c_str, out, not_null, count=bufferSize") char const** categoryDescriptionBuffer,
size_t bufferSize
) noexcept = 0;
/**
* Define a new category
*
* @param[in] categoryName Name of the new category
* @param[in] categoryDescription Description of the category
*
* @return false if there was already a category with the given name
*/
virtual bool defineCategory_abi(
OMNI_ATTR("c_str, in, not_null") char const* categoryName,
OMNI_ATTR("c_str, in, not_null") char const* categoryDescription
) noexcept = 0;
/**
* Remove an existing category, mainly to manage the ones created by a node type for itself
*
* @param[in] categoryName Name of the category to remove
*
* @return false if there was no category with the given name
*/
virtual bool removeCategory_abi(
OMNI_ATTR("c_str, in, not_null") char const* categoryName
) noexcept = 0;
};
} // namespace core
} // namespace graph
} // namespace omni
#include "INodeCategories.gen.h" // generated file
|
omniverse-code/kit/include/omni/graph/core/PyIBundleFactory.gen.h | // Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindIBundleFactory(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::core::IBundleFactory_abi>,
omni::core::ObjectPtr<omni::core::Generated<omni::graph::core::IBundleFactory_abi>>, omni::core::IObject>
clsParent(m, "_IBundleFactory");
py::class_<omni::graph::core::IBundleFactory, omni::core::Generated<omni::graph::core::IBundleFactory_abi>,
omni::core::ObjectPtr<omni::graph::core::IBundleFactory>, omni::core::IObject>
cls(m, "IBundleFactory");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::core::IBundleFactory>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::core::IBundleFactory>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::core::IBundleFactory instantiation");
}
return tmp;
}));
return omni::python::PyBind<omni::graph::core::IBundleFactory>::bind(cls);
}
|
omniverse-code/kit/include/omni/graph/core/iComputeGraph.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/events/IEvents.h>
#include <carb/Defines.h>
#include <carb/Interface.h>
#include <carb/Types.h>
#include <omni/fabric/Enums.h>
#include <omni/fabric/IPath.h>
#include <omni/fabric/IToken.h>
#include <omni/graph/core/Type.h>
#include <omni/graph/core/Handle.h>
#include <omni/graph/core/ISchedulingHints2.h>
#include <omni/graph/core/ogn/Types.h>
#include <omni/graph/core/IVariable2.h>
#include <omni/graph/core/iAttributeData.h>
#include <omni/graph/exec/unstable/Stamp.h>
#include <omni/graph/core/bundle/IBundle1.h>
#include <omni/inspect/IInspector.h>
#include <cstddef>
// Interfaces pulled out of this file but which are still referenced through it
#include "IGraphRegistry.h"
#ifdef __CUDA_ARCH__
#error iComputeGraph.h cannot be included from a .cu file due to a compiler problem. You probably want Handle.h.
#endif
namespace omni
{
namespace graph
{
namespace core
{
//! If 1 then extra logging is enabled (which affects performance)
#define COMPUTE_GRAPH_VERBOSE_LOGGING 0
// ==============================================================================================================
/** Encapsulates the information required to define a file format version number */
struct FileFormatVersion
{
int majorVersion; //!< Major version, for which changes mean incompatible formats
int minorVersion; //!< Minor version, for which changes mean compatible formats, possibly with auto-upgrades
/**
* @brief Equality operator for the file format version object
*
* @param rhs Version number to compare against
* @return true The version number is not equal to @p rhs
* @return false The version number is equal to @p rhs
*/
bool operator==(const FileFormatVersion& rhs) const
{
return rhs.majorVersion == majorVersion && rhs.minorVersion == minorVersion;
}
/**
* @brief Inequality operator for the file format version object
*
* @param rhs Version number to compare against
* @return true The version number is not equal to @p rhs
* @return false The version number is equal to @p rhs
*/
bool operator!=(const FileFormatVersion& rhs) const
{
return !(*this==rhs);
}
/**
* @brief Less-than operator for the file format version object
*
* @param rhs Version number to compare against
* @return true The version number is less than @p rhs
* @return false The version number is greater than or equal to @p rhs
*/
bool operator<(const FileFormatVersion& rhs) const
{
return majorVersion < rhs.majorVersion || ((majorVersion == rhs.majorVersion) && (minorVersion < rhs.minorVersion));
}
/**
* @brief Greater-than operator for the file format version object
*
* @param rhs Version number to compare against
* @return true The version number is greater than @p rhs
* @return false The version number is less than or equal to @p rhs
*/
bool operator>(const FileFormatVersion& rhs) const
{
return majorVersion > rhs.majorVersion || ((majorVersion == rhs.majorVersion) && (minorVersion > rhs.minorVersion));
}
};
struct GraphObj;
// ==============================================================================================================
/** Callback object to instantiate for use as a callback when an older version of an OmniGraph file is read */
struct FileFormatUpgrade
{
/**
* Callback function definition. Parameters are
*
* - oldVersion Version of the file being read
* - newVersion Current version of the file format
* - userData User data to pass to the callback
*/
void(*fileFormatUpgradeCallback)(const FileFormatVersion& oldVersion,
const FileFormatVersion& newVersion,
GraphObj& graphObj,
void* userData);
/** User data to pass to the callback function */
void* userData;
/**
* @brief Equality operator for the file format upgrade callback object
*
* @param rhs Callback object to compare against
* @return true The callback object is equal to @p rhs
* @return false The callback object is not equal to @p rhs
*/
bool operator==(const FileFormatUpgrade& rhs) const
{
return rhs.fileFormatUpgradeCallback == fileFormatUpgradeCallback &&
rhs.userData == userData;
}
/**
* @brief Inequality operator for the file format upgrade callback object
*
* @param rhs Callback object to compare against
* @return true The callback object is not equal to @p rhs
* @return false The callback object is equal to @p rhs
*/
bool operator!=(const FileFormatUpgrade& rhs) const
{
return !(*this == rhs);
}
};
// ==============================================================================================================
/**
* @brief Encapsulation of a callback that happens when a node's error status changes
*
*/
struct ErrorStatusChangeCallback
{
/**
* Callback function definition. Parameters are
*
* - nodeList List of nodes whose error status changed since the last compute
* - graphObj Graph to which the nodes belong
* - userData User data to pass to the callback
*/
void (*errorStatusChangeCallback)(const std::vector<NodeObj>& nodeList, GraphObj& graphObj, void* userData);
/** User data to pass to the callback function */
void* userData;
/**
* @brief Equality operator for the error status change callback object
*
* @param rhs Callback object to compare against
* @return true The callback object is equal to @p rhs
* @return false The callback object is not equal to @p rhs
*/
bool operator==(const ErrorStatusChangeCallback& rhs) const
{
return rhs.errorStatusChangeCallback == errorStatusChangeCallback &&
rhs.userData == userData;
}
/**
* @brief Inequality operator for the error status change callback object
*
* @param rhs Callback object to compare against
* @return true The callback object is not equal to @p rhs
* @return false The callback object is equal to @p rhs
*/
bool operator!=(const ErrorStatusChangeCallback& rhs) const
{
return !(*this == rhs);
}
};
// ==============================================================================================================
/** Type of connection between two attributes */
enum ConnectionType
{
kConnectionType_Regular = 0, //!< Normal attribute to attribute evaluation connection
kConnectionType_DataOnly = 1, //!< Data only connection, not implying evaluation
kConnectionType_Execution = 2, //!< Execution type connection, for execution type attributes
kConnectionType_Bundle = 3, //!< Bundle to bundle connections
kConnectionType_PureRelationship = 4 //!< Only establish a relationship, no execution or data passed
};
// ==============================================================================================================
/** Extended type of an attribute */
enum ExtendedAttributeType
{
kExtendedAttributeType_Regular = 0, //!< No extended type, just a normal strongly typed attribute
kExtendedAttributeType_Union = 1, //!< An attribute that could be any one of a specific list of types
kExtendedAttributeType_Any = 2 //!< An attribute that can be any legal type
};
// ==============================================================================================================
/** Port type of an attribute */
enum AttributePortType
{
kAttributePortType_Input = 0, //!< The attribute is an input
kAttributePortType_Output = 1, //!< The attribute is an output
kAttributePortType_State = 2, //!< The attribute holds state information
kAttributePortType_Unknown = 3 //!< The port type is currently unknown
};
// ==============================================================================================================
/**
* The kind of backing for the graph - can be one of FC shared, with history
* or without history. The shared FC means the orchestration graph (the graph
* containing all other graphs as nodes) has a stage with history, and all
* other global level graphs (which are nodes in this orchestration graph) share
* this stage with history FC. The shared FC also applies to subgraphs that share
* the same FC as their parent graph. The stage with history setting is self explanatory
* but note there can only be 1 stage with history currently. The stage without
* history uses a separate FC to house the data for the graph
* The "None" backing type are for orchestration graphs (graphs that hold other graphs
* as nodes) that don't necessarily need a cache to hold "real" data`
*/
enum GraphBackingType
{
kGraphBackingType_FabricShared = 0, //!< Graph backing is a shared copy of Fabric
kGraphBackingType_FabricWithHistory = 1, //!< Use the Fabric instantiation that has history
kGraphBackingType_FabricWithoutHistory = 2, //!< Use the Fabric instantiation that has no retained history
kGraphBackingType_Unknown = 3, //!< Backing type is currently unknown
kGraphBackingType_None = 4, //!< There is no backing for the OmniGraph data
kGraphBackingType_FlatCacheShared = 0, //!< @private Deprecated, use kGraphBackingType_FabricShared
kGraphBackingType_FlatCacheWithHistory = 1, //!< @private Deprecated, use kGraphBackingType_FabricWithHistory
kGraphBackingType_FlatCacheWithoutHistory = 2, //!< @private Deprecated, use kGraphBackingType_FabricWithoutHistory
};
// ==============================================================================================================
/**
* The pipeline stage defines where this graph is going to be used - as part of
* simulation (before rendering), pre-rendering (after sim but before render), or
* post-rendering. Each pipeline stage will have a set of graphs that will
* be run there. We use larger numbers for the enums with spaces in between
* so that other stages can be inserted in the future. The custom pipeline stage
* allows for graphs that can be run at any unknown time.
*/
enum GraphPipelineStage
{
kGraphPipelineStage_Simulation = 10, //!< The simulation phase, a.k.a. normal evaluation
kGraphPipelineStage_PreRender = 20, //!< The preRender phase, run just before Hydra takes over
kGraphPipelineStage_PostRender = 30, //!< The postRender phase, run after Hydra finishes
kGraphPipelineStage_Unknown = 100, //!< The phase is currently unknown
kGraphPipelineStage_OnDemand = 200, //!< The graph evaluates only on demand, not as part of the pipeline
kGraphPipelineStage_Count = 4 //!< The number of existing pipelines
};
// ==============================================================================================================
/**
* The graph evaluation mode specifies whether a graph is intended to self-evaluate,
* or if represents an asset to by evaluated on behalf of a different Prim.
*/
enum class GraphEvaluationMode
{
//! In Automatic mode, the graph is evaluated as Standalone, unless an OmniGraphAPI interface has a relationship to it,
//! in which case it is evaluated as Instanced mode.
Automatic = 0,
//! In Standalone mode, the graph is evaluated once, with itself as the graph target
Standalone = 1,
//! In Instanced mode, the graph is evaluated once for each OmniGraphAPI interface with a relationship to the graph Prim
//! Graphs that are used as assets should use this mode to prevent standalone execution.
Instanced = 2
};
// ==============================================================================================================
/** Information passed to define the opposite end of a connection */
struct ConnectionInfo
{
AttributeObj attrObj; //!< Attribute at the opposite end
ConnectionType connectionType; //!< Type of connection being made
};
// ==============================================================================================================
/** Callback object used when a connection is made or broken between two attributes */
struct ConnectionCallback
{
/**
* Callback function definition. Parameters are
*
* - srcAttr Source end of the connection that changed
* - dstAttr Destination end of the connection that changed
* - userData User data to pass to the callback
*/
void(*connectionCallback)(const AttributeObj& srcAttr, const AttributeObj& dstAttr, void* userData);
/** User data to pass to the callback function */
void* userData;
/**
* @brief Equality operator for the connection/disconnection callback object
*
* @param rhs Callback object to compare against
* @return true The callback object is equal to @p rhs
* @return false The callback object is not equal to @p rhs
*/
bool operator==(const ConnectionCallback& rhs) const
{
return rhs.connectionCallback == connectionCallback &&
rhs.userData == userData;
}
/**
* @brief Inequality operator for the connection/disconnection callback object
*
* @param rhs Callback object to compare against
* @return true The callback object is not equal to @p rhs
* @return false The callback object is equal to @p rhs
*/
bool operator!=(const ConnectionCallback& rhs) const
{
return !(*this == rhs);
}
};
// ==============================================================================================================
/** Callback object used when a path has changed, requiring a path attribute update */
struct PathChangedCallback
{
/**
* Callback function definition. Parameters are
*
* - paths Array of paths that have changed
* - numPaths Number of paths in the array
* - userData User data to pass to the callback
*/
void(*pathChangedCallback)(const omni::fabric::PathC* paths, const size_t numPaths, void* userData);
/** User data to pass to the callback function */
void* userData;
/**
* @brief Equality operator for the path change callback object
*
* @param rhs Callback object to compare against
* @return true The callback object is equal to @p rhs
* @return false The callback object is not equal to @p rhs
*/
bool operator==(const PathChangedCallback& rhs) const
{
return rhs.pathChangedCallback == pathChangedCallback &&
rhs.userData == userData;
}
/**
* @brief Inequality operator for the path change callback object
*
* @param rhs Callback object to compare against
* @return true The callback object is not equal to @p rhs
* @return false The callback object is equal to @p rhs
*/
bool operator!=(const PathChangedCallback& rhs) const
{
return !(*this == rhs);
}
};
namespace ogn
{
class OmniGraphDatabase;
}
/**
* A callback allocate, initialize, and returns an OGN database for a given node
*/
using CreateDbFunc = ogn::OmniGraphDatabase* (*)(GraphContextObj const*, NodeObj const*, size_t);
/**
* Parameters for IGraph::CreateGraphAsNode
*/
struct CreateGraphAsNodeOptions
{
static const uint8_t kCurrentVersion = 1; //!< Version number of this structure
//! The version of this structure.
const uint8_t version{ CreateGraphAsNodeOptions::kCurrentVersion };
//! The name of the node that wraps the graph
const char* nodeName{ "" };
//! The path to where the graph that the node will wrap will be added
const char* graphPath{ "" };
//! The evaluator to use for the new graph
const char* evaluatorName{ "" };
//! Whether this graph is a top level global graph
bool isGlobalGraph{ true };
//! Whether to back this graph by USD
bool backByUSD{ true };
//! What kind of FC backs this graph
GraphBackingType backingType{ kGraphBackingType_FabricShared };
//! What pipeline stage this graph occupies
GraphPipelineStage pipelineStage{ kGraphPipelineStage_Simulation };
//! The evaluation mode for the graph
GraphEvaluationMode evaluationMode{ GraphEvaluationMode::Automatic };
};
/**
* @brief Possible values to be set for Attributes of type "execution"
* @private Deprecated. See omni::graph::action::IActionGraph.
*/
enum ExecutionAttributeState : uint32_t
{
kExecutionAttributeStateDisabled, //!< Output attribute connection is disabled
kExecutionAttributeStateEnabled, //!< Output attribute connection is enabled
//! Output attribute connection is enabled and the node is pushed to the evaluation stack
kExecutionAttributeStateEnabledAndPush,
//! Push this node as a latent event for the current entry point
kExecutionAttributeStateLatentPush,
// Output attribute connection is enabled and the latent state is finished for this node
kExecutionAttributeStateLatentFinish
};
/**
* The attribute name prefix used for attributes which hold the concrete value of resolved extended attributes.
*/
#define RESOLVED_ATTRIBUTE_PREFIX "__resolved_"
/**
* The path used to identify the targeted prim, when graph instancing is used. At runtime this token will be
* replaced with the absolute path to the targeted prim.
*/
static constexpr char kInstancingGraphTargetPath[] = "_OMNI_GRAPH_TARGET";
/**
* The value of an uninitialized attribute for a type (eg: tuple_count, array_depth)
*/
constexpr uint8_t kUninitializedTypeCount = UINT8_MAX;
/**
* Value representing an instance
*/
struct InstanceIndex
{
/** Returns true iff this InstanceIndex is equal to the @p other */
bool const operator==(InstanceIndex const& other) const
{ return index == other.index; }
/** Returns true iff this InstanceIndex is not equal to the @p other */
bool const operator!=(InstanceIndex const& other) const
{ return index != other.index; }
/** Returns true iff this InstanceIndex is less than the @p other */
bool const operator<(InstanceIndex const& other) const
{ return index < other.index; }
/** Returns true iff this InstanceIndex is less than or equal to the @p other */
bool const operator<=(InstanceIndex const& other) const
{ return index <= other.index; }
/** Returns true iff this InstanceIndex is greater than the @p other */
bool const operator>(InstanceIndex const& other) const
{ return index > other.index; }
/** Returns true iff this InstanceIndex is greater than or equal to the @p other */
bool const operator>=(InstanceIndex const& other) const
{ return index >= other.index; }
/** Returns the sum of this instance index and that of the @p other */
InstanceIndex operator+(InstanceIndex other) const
{ return { index + other.index }; }
/** Returns the sum of this instance index and @p idx */
InstanceIndex operator+(size_t idx) const
{ return { index + idx }; }
/** Increments the instance index by the index amount contained in @p other */
InstanceIndex& operator+=(InstanceIndex other)
{ index += other.index; return *this; }
/** Increments the index value */
InstanceIndex& operator++()
{ index++; return *this; }
size_t index{ 0 }; //!< Index value for the instance
};
/**
* Some default instance value
*/
static constexpr InstanceIndex kAuthoringGraphIndex{ size_t(-1) }; //!< Special index for the authoring graph original
static constexpr InstanceIndex kAccordingToContextIndex{ size_t(-2) }; //!< Special index for a context
static constexpr InstanceIndex kInvalidInstanceIndex{ size_t(-3) }; //!< Special index indicating an invalid instance
// ======================================================================
/** Interface to provide functionality to access and modify properties of an OmniGraph attribute.
*/
struct IAttribute
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::IAttribute", 1, 12);
/**
* Returns the name of the attribute
*
* @param[in] attr Reference to the AttributeObj struct representing the attribute object
* @return The name of the attribute
*/
const char*(CARB_ABI* getName)(const AttributeObj& attr);
/**
* Returns the type name of the attribute
*
* @param[in] attr Reference to the AttributeObj struct representing the attribute object
* @return The type name of the attribute
*/
const char*(CARB_ABI* getTypeName)(const AttributeObj& attr);
/**
* Returns the extended type, if any, of the attribute. Extended types are things like "union" and
* "any" types that aren't in the explicit list of types in USD. kExtendedAttributeType_Regular
* means that the attribute is not one of these extended types.
*
* @param[in] attr Reference to the AttributeObj struct representing the attribute object
* @return The extended type of the attribute
*/
ExtendedAttributeType(CARB_ABI* getExtendedType)(const AttributeObj& attr);
/**
* Returns the resolved type an extended type like union actually turns out to be, by
* inferring it from the connection. If the type is still not resolved, the BaseDataType of
* the returned type will have eNone as its value.
*
* If the attribute type is just a Regular one then this method will return its permanent type.
*
* @param[in] attr Reference to the AttributeObj struct representing the attribute object
* @return The resolved type of the attribute, based on the connection
*/
Type(CARB_ABI* getResolvedType)(const AttributeObj& attr);
/**
* Returns whether the attribute is an array
*
* @param[in] attr Reference to the AttributeObj struct representing the attribute object
* @return Whether or not the attribute is an array
*/
bool(CARB_ABI* isArray)(const AttributeObj& attr);
/**
* Connects an attribute using a relationship to some other prim. This could be for a bundle connection
* or a pure relationship to a prim. In the case of a pure relationship to a prim, some meta-data will
* be added to mark the relationship as not being used for a bundle
*
* @param[in] attrObj The attr that represents the relationship
* @param[in] pathToPrim The path to the prim to connect to
* @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation
* @param[in] isBundleConnection Whether the connection is to be used for bundles or just a pure relationship to a prim
* @return true if connection is successful, false otherwise
*/
bool(CARB_ABI* connectPrim)(const AttributeObj& attrObj, const char* pathToPrim, bool modifyInUsd, bool isBundleConnection);
/**
* Disconnects an attribute using a relationship to some other prim. This could be for a bundle connection
* or a pure relationship to a prim.
*
* @param[in] attrObj The attr that represents the relationship
* @param[in] pathToPrim The path to the prim to disconnect
* @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation
* @param[in] isBundleConnection Whether the connection is to be used for bundles or just a pure relationship to a prim
* @return true if disconnection is successful, false otherwise
*/
bool(CARB_ABI* disconnectPrim)(const AttributeObj& attrObj, const char* pathToPrim, bool modifyInUsd, bool isBundleConnection);
/**
* Connects two attributes together to add an edge to the graph. This is a legacy version of the
* connection API. Calling this is equivalent to setting kConnectionType_Regular type connections.
* Please use connectAttrsEx to have fuller control over the kind of connections created.
*
* @param[in] srcAttr The attr that is the source of the directed connection
* @param[in] destAttr The attr that is the destination of the directed connection
* @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation
* @return true if connection is successful, false otherwise
*/
bool(CARB_ABI* connectAttrs)(const AttributeObj& srcAttr, const AttributeObj& destAttr, bool modifyInUsd);
/**
* Connects two attributes together to add an edge to the graph. This is an extended version with
* more information about the connection, such as the type of connection.
*
* @param[in] srcAttr The attr that is the source of the directed connection
* @param[in] destAttrInfo A ConnectionInfo struct describing the connection
* @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation
* @return true if connection is successful, false otherwise
*/
bool(CARB_ABI* connectAttrsEx)(const AttributeObj& srcAttr, const ConnectionInfo& destAttr, bool modifyInUsd);
/**
* Disconnects two attributes that are connected
*
* @param[in] srcAttr The attribute that is the source of the directed connection
* @param[in] destAttr The attribute that is the destination of the directed connection
* @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation
* @return true if connection is successfully broken, false otherwise (if no connections existed)
*/
bool(CARB_ABI* disconnectAttrs)(const AttributeObj& srcAttr, const AttributeObj& destAttr, bool modifyInUsd);
/**
* Queries whether two attributes are connected
*
* @param[in] srcAttr The attribute that is the source of the directed connection
* @param[in] destAttr The attribute that is the destination of the directed connection
* @return true if the two attributes are connected, false otherwise
*/
bool(CARB_ABI* areAttrsConnected)(const AttributeObj& srcAttr, const AttributeObj& destAttr);
/**
* Queries whether two attributes are connection compatible
*
* @param[in] srcAttr The attribute that would be the source of the directed connection
* @param[in] destAttr The attribute that would be the destination of the directed connection
* @return true if the two attributes are compatible, false otherwise
*/
bool(CARB_ABI* areAttrsCompatible)(const AttributeObj& srcAttr, const AttributeObj& destAttr);
/**
* Retrieves the number of upstream connections to the attribute of a node
*
* @param[in] attrObj The attribute object for which to retrieve the connection count
* @return The number of upstream connections to that attribute
*/
size_t(CARB_ABI* getUpstreamConnectionCount)(const AttributeObj& attrObj);
/**
* Retrieves the upstream connections of the attribute of a node
*
* @param[in] attrObj The attribute object for which to retrieve the connections
* @param[out] attrsBuf Buffer to hold the return AttributeObj
* @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold
* @return true on success, false on failure
*/
bool(CARB_ABI* getUpstreamConnections)(const AttributeObj& attrObj, AttributeObj* attrsBuf, size_t bufferSize);
/**
* Retrieves the detailed upstream connection info of an attribute. Includes information like type of
* connections.
*
* @param[in] attrObj The attribute object for which to retrieve the connections
* @param[out] connectionInfoBif Buffer to hold the return ConnectionInfo
* @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold
* @return true on success, false on failure
*/
bool(CARB_ABI* getUpstreamConnectionsInfo)(const AttributeObj& attrObj,
ConnectionInfo* connectionInfoBuf,
size_t bufferSize);
/**
* Retrieves the number of downstream connections to the attribute of a node
*
* @param[in] attrObj The attribute object for which to retrieve the connection count
* @return The number of downstream connections to that attribute
*/
size_t(CARB_ABI* getDownstreamConnectionCount)(const AttributeObj& attrObj);
/**
* Retrieves the down connections of the attribute of a node
*
* @param[in] attrObj The attribute object for which to retrieve the connections
* @param[out] attrsBuf Buffer to hold the return AttributeObj
* @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold
* @return true on success, false on failure
*/
bool(CARB_ABI* getDownstreamConnections)(const AttributeObj& attrObj, AttributeObj* attrsBuf, size_t bufferSize);
/**
* Retrieves the detailed upstream connection info of an attribute. Includes information like type of
* connections.
*
* @param[in] attrObj The attribute object for which to retrieve the connections
* @param[out] connectionInfoBif Buffer to hold the return ConnectionInfo
* @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold
* @return true on success, false on failure
*/
bool(CARB_ABI* getDownstreamConnectionsInfo)(const AttributeObj& attrObj,
ConnectionInfo* connectionInfoBuf,
size_t bufferSize);
/**
* Retrieves the node associated with this attribute
*
* @param[in] attrObj The attribute object for which to retrieve the node
* @return The NodeObj representing the node. In case of failure, the node handle
* will be kInvalidNodeHandle
*/
NodeObj(CARB_ABI* getNode)(const AttributeObj& attrObj);
/**
* Ensures the attribute's value is updated, before reading it. For push graphs this does nothing, as the
* push graph is always evaluating and considered up to date.
* For pull graphs, this generates the true "pull" on the attribute, that will cause whatever is upstream
* and is dirty to evaluate.
*
* @param[in] attrObj The attribute object for which to update the value for
* @param[in] updateImmediately Whether to cause the graph to update immediately (synchronously)
* @return Whether the update was successful
*/
bool(CARB_ABI* updateAttributeValue)(const AttributeObj& attrObj, bool updateImmediately);
/** @private Deprecated - do not use */
AttributeDataHandle(CARB_ABI* deprecated_0)(const AttributeObj&);
/** @private Deprecated - do not use */
ConstAttributeDataHandle(CARB_ABI* deprecated_1)(const AttributeObj&);
/**
* Registers a callback to be invoked when the value of the current attribute changes
*
* An attribute only permits a single callback and when called, the previously set callback
* is replaced. Passing nullptr as the callback will remove any existing callback.
*
* @param[in] thisAttribute Reference to the AttributeObj struct representing the current attribute object
* @param[in] onValueChanged The callback to trigger. Parameters are the attribute involved, and the new value
* @param[in] triggerOnConnected Whether to trigger the callback on connected attributes.
*/
void(CARB_ABI* registerValueChangedCallback)(const AttributeObj& attrObj,
void (*onValueChanged)(const AttributeObj& attr, const void* value),
bool triggerOnConnected);
/**
* Returns the set of all metadata on this attribute.
*
* The keyBuf and valueBuf arrays preallocated by the caller, and contain at least "getMetadataCount()"
* entries in them.
* All returned strings are owned by the node type and not to be destroyed.
* The returned keyBuf and valueBuf must have exactly the same size with corresponding index values; that is
* keyBuf[i] is the metadata name for the string in valueBuf[i].
*
* @param[in] thisAttribute Reference to the AttributeObj struct representing the current attribute object
* @param[out] keyBuf Buffer in which to put the list of metadata keys
* @param[out] valueBuf Buffer in which to put the list of metadata values
* @param[in] bufferSize the number of strings each of the two buffers is able to hold
* @return Number of metadata items successfully populated
*/
size_t(CARB_ABI* getAllMetadata)(const AttributeObj& thisAttribute,
const char** keyBuf,
const char** valueBuf,
size_t bufferSize);
/**
* Retrieves a metadata value from this attribute
*
* @param[in] thisAttribute Reference to the AttributeObj struct representing the current attribute object
* @param[in] key The name of the metadata to be retrieved
* @return The value of the metadata, or nullptr if the named metadata was not set on this attribute
*/
const char*(CARB_ABI* getMetadata)(const AttributeObj& thisAttribute, const char* key);
/**
* Returns the number of metadata entries on this attribute
*
* @param[in] thisAttribute Reference to the AttributeObj struct representing the current attribute object
* @return the number of metadata key/value pairs on this attribute
*/
size_t(CARB_ABI* getMetadataCount)(const AttributeObj& thisAttribute);
/**
* Sets a metadata value on this attribute.
*
* Certain metadata keywords have special meaning internally:
* uiName: The name of the attribute in a longer, human-readable format
*
* Note: The main way for metadata to be set is through the .ogn format files. If you call this directly the
* metadata will not persist across sessions. If you wish to define metadata outside of the .ogn file
* the best method is to override the initializeType() method in your attribute definition and set it there.
*
* @param[in] attrObj Reference to the AttributeObj struct representing the current attribute object
* @param[in] key The keyword, used as the name of the metadata
* @param[in] value The value of the metadata. Metadata can be parsed later if non-string values are desired.
*/
void(CARB_ABI* setMetadata)(const AttributeObj& attrObj, const char* key, const char* value);
/**
* Where we have dynamic scheduling, downstream nodes can have their execution disabled by turning on the flag
* in the upstream attribute. Note you also have to call setDynamicDownstreamControl on the node to enable
* this feature. See setDynamicDownstreamControl on INode for further information.
*
* @param[in] attrObj Reference to the AttributeObj struct representing the current attribute object
* @return Whether downstream nodes connected to this attribute should be disabled from further work
*/
bool (CARB_ABI* getDisableDynamicDownstreamWork)(const AttributeObj& attrObj);
/**
* Where we have dynamic scheduling, downstream nodes can have their execution disabled by turning on the flag
* in the upstream attribute. Note you also have to call setDynamicDownstreamControl on the node to enable
* this feature. This function allows you to set the flag on the attribute that will disable the downstream
* node. See setDynamicDownstreamControl on INode for further information.
*
* @param[in] attrObj Reference to the AttributeObj struct representing the current attribute object
* @param[in] value Whether to disable the downstream connected nodes or not.
*/
void (CARB_ABI* setDisableDynamicDownstreamWork)(const AttributeObj& attrObj, bool value);
/**
* Sets the resolved type of an extended type. This should be called by a node from the
* *onConnectionTypeResolve()* callback when it determines that an extended-type attribute
* can be resolved to a specific type. For example a generic 2-input "Add" node could resolve input B and its
* output attribute type to float when input A is connected to a float. Passing @ref omni::fabric::Type() will
* reset the attribute type to "unresolved".
*
* @note This operation is asynchronous because it is considered as part of a whole-graph type resolution
* algorithm. It also may not succeed because there could be constraints in the graph that
* prevent the type from being resolved as requested.
*
* @param[in] attr Reference to the AttributeObj struct representing the attribute object
* @param[in] type The new type of the attribute
*/
void(CARB_ABI* setResolvedType)(const AttributeObj& attr, const Type& type);
/**
* Retrieves the port type (such as input, output, state) associated with this attribute
*
* @param[in] attrObj The attribute object for which to retrieve the connections
* @return the AttributePortType of this attribute
*/
AttributePortType(CARB_ABI* getPortType)(const AttributeObj& attrObj);
/**
* Returns whether the attribute is a dynamic attribute (not in the node definition) or not
*
* @param[in] attrObj The attribute object for which to query
* @return Whether the attribute is a dynamic one
*/
bool(CARB_ABI* isDynamic)(const AttributeObj& attrObj);
/**
* Returns the full path to the attribute, including the node path
*
* @param[in] attr Reference to the AttributeObj struct representing the attribute object
* @return The full path to the attribute
*/
const char*(CARB_ABI* getPath)(const AttributeObj& attrObj);
/**
* @return The string representing the extended union types, nullptr if the attribute is not a union type
*/
const char*(CARB_ABI* getUnionTypes)(const AttributeObj& attribute);
/**
* Returns whether the attribute is still valid or not
*
* @param[in] attrObj The attribute object for which to query
* @return Whether the attribute is still valid
*/
bool(CARB_ABI* isValid)(const AttributeObj& attrObj);
/**
* Return the attribute name with the port type prepended if it isn't already present.
*
* @param[in] name The attribute name, with or without the port prefix
* @param[in] portType The port type of the attribute
* @param[in] isBundle true if the attribute name is to be used in a bundle. Note that colon is an illegal character
* in bundled attributes so an underscore is used instead.
* @return The name with the proper prefix for the given port type
*/
NameToken(CARB_ABI* ensurePortTypeInName)(NameToken name, AttributePortType portType, bool isBundle);
/**
* Parse the port type from the given attribute name if present. The port type is indicated by a prefix seperated by
* a colon or underscore in the case of bundled attributes.
*
* @param[in] name The attribute name
* @return The port type indicated by the attribute prefix if present. AttributePortType::kAttributePortType_Unknown
* if there is no recognized prefix.
*/
AttributePortType(CARB_ABI* getPortTypeFromName)(NameToken name);
/**
* Return the attribute name with the port type removed if it is present. For example "inputs:attr" becomes "attr"
*
* @param[in] name The attribute name, with or without the port prefix
* @param[in] isBundle true if the attribute name is to be used in a bundle. Note that colon is an illegal character
* in bundled attributes so an underscore is used instead.
* @return The name with the port type prefix removed
*/
NameToken(CARB_ABI* removePortTypeFromName)(NameToken name, bool isBundle);
/**
* Get the optional compute flag from the attribute. When true this flag indicates that the attribute does not
* need to be valid in order for the compute() function to be called.
* Note that "valid" does not necessarily mean the attribute data is up to date, it only means that everything
* required to locate the attribute data is available and valid (otherwise outputs would always be invalid).
*
* @param[in] attrObj The attribute object being queried
* @return True if the attribute is optional for compute
*/
bool(CARB_ABI* getIsOptionalForCompute)(const AttributeObj& attrObj);
/**
* Set whether the attribute is optional for compute or not; mostly used by generated code.
* This flag would be set on attributes that the compute() method may not look at. It would then be up to the
* compute() method to check validity if it ends up requiring the attribute's value. You might use this when an
* attribute value is not used in all compute paths, like a "choice" node that selects exactly one of its inputs
* to send to the output - only the selected input would need to be valid for compute to succeed.
*
* @param[in] attrObj The attribute object being modified
* @param[in] isOptional New value for the optional flag on the attribute
*/
void(CARB_ABI* setIsOptionalForCompute)(const AttributeObj& attrObj, bool isOptional);
/**
* Returns an AttributeDataHandle to access the default data on this input attribute.
*
* @param[in] attrObj The input attribute object for which to update the default value for
* @return The AttributeDataHandle associated with the default value of this attribute, to mutate data in the FC
*/
AttributeDataHandle(CARB_ABI* getDefaultValueAttributeDataHandle)(const AttributeObj& attrObj);
/**
* Get the deprecated flag from the attribute. When true this flag indicates that the attribute has been
* deprecated and will be removed in a future version of the node.
*
* @param[in] attrObj The attribute object being queried
* @return True if the attribute is deprecated
*/
bool(CARB_ABI* isDeprecated)(const AttributeObj& attrObj);
/**
* Return the deprecation message for an attribute.
*
* @param[in] attributeObj Attribute to which this function applies
* @return String containing the attribute deprecation message (nullptr if the attribute is not deprecated)
*/
char const*(CARB_ABI* deprecationMessage)(const AttributeObj& attributeObj);
/**
* Returns an AttributeDataHandle to access the data on this attribute.
*
* @param[in] attrObj The attribute object for which to retrieve the data accessor
* @param[in] instanceIdx The instance index relative to the current active instance for which you want to retrieve the data.
* @return The AttributeDataHandle associated with this attribute, to mutate data in the FC
*/
AttributeDataHandle(CARB_ABI* getAttributeDataHandle)(const AttributeObj& attrObj, InstanceIndex instanceIdx);
/**
* Returns a ConstAttributeDataHandle to access the data on this attribute.
*
* @param[in] attrObj The attribute object for which to retrieve the data accessor
* @param[in] instanceIdx The instance index relative to the current active instance for which you want to retrieve the data.
* @return The ConstAttributeDataHandle associated with this attribute, to read data in the FC
*/
ConstAttributeDataHandle(CARB_ABI* getConstAttributeDataHandle)(const AttributeObj& attrObj, InstanceIndex instanceIdx);
/**
* Returns whether or not this attribute is a runtime constant or not.
* Runtime constant will keep the same value every frame, for every instances
* This property can be taken advantage in vectorized compute
*
* @param[in] attrObj The attribute object to query
*
* @return true if the attribute is a runtime constant, false otherwise
*/
bool(CARB_ABI* isRuntimeConstant)(const AttributeObj& attrObj);
/**
* Warn the framework that writing to the provided attributes is done, so it can trigger callbacks attached to them
*
* @param[in] attrObjs A pointer to an array of attribute objects for which to call change callbacks
* @param[in] attrObjCount The number of object(s) in that array
*/
void(CARB_ABI* writeComplete)(const AttributeObj* attrObjs, size_t attrObjCount);
/**
* Returns the name of the attribute as a token
*
* @param[in] attr Reference to the AttributeObj struct representing the attribute object
* @return The name of the attribute as a token
*/
NameToken(CARB_ABI* getNameToken)(const AttributeObj& attr);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IAttribute, getNameToken, 47)
// ======================================================================
/** Each node type in a plugin must implement this interface */
struct INodeType
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::INodeType", 1, 12);
/**
* Return node type name - this is used by 'node:type' schema in Node prim.
* For retrieving the node type name of a known NodeTypeObj use getTypeName().
*
* @return The node type name
*/
const char*(CARB_ABI* getNodeType)();
/**
* Implementation of compute, see above GraphContext for functions to pull/push data with other nodes
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] node Reference to the NodeObj struct representing the node object
* @return true if compute is successful, false otherwise
*/
bool(CARB_ABI* compute)(const GraphContextObj& context, const NodeObj& node);
// functions below are optional
/**
* This allows each node to define custom data for each node instance
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] node Reference to the NodeObj struct representing the node object pointer
*/
void(CARB_ABI* initialize)(const GraphContextObj& context, const NodeObj& node);
/**
* Release memory created by initialize function above
*
* @param[in] node Reference to the NodeObj struct representing the node object
* pointer
*/
void(CARB_ABI* release)(const NodeObj& node);
/**
* This allows each node to be upgraded/downgraded for each node instance
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] oldVersion int specifying the old version (of the node)
* @param[in] newVersion int specifying the new version (of the node type)
* @return true if the node was modified, false otherwise
*/
bool(CARB_ABI* updateNodeVersion)(const GraphContextObj& context,
const NodeObj& node,
int oldVersion,
int newVersion);
/**
* This allows each node type to specify its inputs and outputs in order to build up a description
* of the node type. This is done by calling the provided implementations of addInput and addOuput to
* add the requisite inputs and outputs respectively.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
*/
void(CARB_ABI* initializeType)(const NodeTypeObj& nodeType);
/**
* Adds an input for the node type. This is intended to be called from initializeType when specifying
* the inputs for the node type.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @param[in] name Name of the input
* @param[in] typeName Typename of the input
* @param[in] required Whether or not the input is required
* @param[in] defaultValuePtr Pointer to the location containing the default value of the input
* @param[in] defaultElemCountPtr Number of elements in the default value -- nullptr if the input is a scalar
*/
void(CARB_ABI* addInput)(const NodeTypeObj& nodeType,
const char* name,
const char* typeName,
bool required,
const void* defaultValuePtr,
const size_t* defaultElemCountPtr);
/**
* Adds an output for the node type. This is intended to be called from initializeType when specifying
* the outputs for the node type.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @param[in] name Name of the output
* @param[in] typeName Typename of the output
* @param[in] required Whether or not the output is required
* @param[in] defaultValuePtr Pointer to the location containing the default value of the output
* @param[in] defaultElemCountPtr Number of elements in the default value -- nullptr if the output is a scalar
*/
void(CARB_ABI* addOutput)(const NodeTypeObj& nodeType,
const char* name,
const char* typeName,
bool required,
const void* defaultValuePtr,
const size_t* defaultElemCountPtr);
/**
* Adds a state attribute for the node type. This is intended to be called from initializeType when specifying
* the state information for the node type. State attributes differ from inputs and outputs in that they will
* never leave the node (i.e. they cannot be connected). The node is responsible for ensuring that their contents
* are consistent with the current evaluation.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @param[in] name Name of the input
* @param[in] typeName Typename of the input
* @param[in] required Whether or not the input is required
* @param[in] defaultValuePtr Pointer to the location containing the default value of the input
* @param[in] defaultElemCountPtr Number of elements in the default value -- nullptr if the input is a scalar
*/
void(CARB_ABI* addState)(const NodeTypeObj& nodeType,
const char* name,
const char* typeName,
bool required,
const void* defaultValuePtr,
const size_t* defaultElemCountPtr);
/**
* Adds an extended input (not one of the usual data types) for the node type, for example, a union type.
* This is intended to be called from initializeType when specifying the inputs for the node type.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @param[in] name Name of the input
* @param[in] typeInfo A buffer holding extra information about the type
* @param[in] required Whether or not the input is required
* @param[in] extendedAttrType Whether the attribute is an extended type, like a union or any
*/
void(CARB_ABI* addExtendedInput)(const NodeTypeObj& nodeType,
const char* name,
const char* typeInfo,
bool required,
ExtendedAttributeType extendedAttrType);
/**
* Adds an extended output (not one of the usual data types) for the node type, for example, a union type.
* This is intended to be called from initializeType when specifying the inputs for the node type.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @param[in] name Name of the input
* @param[in] typeInfo A buffer holding extra information about the type
* @param[in] required Whether or not the input is required
* @param[in] extendedAttrType Whether the attribute is an extended type, like a union or any
*/
void(CARB_ABI* addExtendedOutput)(const NodeTypeObj& nodeType,
const char* name,
const char* typeInfo,
bool required,
ExtendedAttributeType extendedAttrType);
/**
* Adds an extended state (not one of the usual data types) for the node type, for example, a union type.
* This is intended to be called from initializeType when specifying the inputs for the node type.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @param[in] name Name of the input
* @param[in] typeInfo A buffer holding extra information about the type
* @param[in] required Whether or not the input is required
* @param[in] extendedAttrType Whether the attribute is an extended type, like a union or any
*/
void(CARB_ABI* addExtendedState)(const NodeTypeObj& nodeType,
const char* name,
const char* typeInfo,
bool required,
ExtendedAttributeType extendedAttrType);
/**
* Return whether state information exists on the node. This is mainly used for determining how nodes can be
* safely scheduled for execution.
*
* Internal state information is data maintained on the node that is unique to the node's evaluation instance.
* As this is somewhat equivalent to member data it cannot be accessed in parallel on the same node. For example,
* a node with internal state data cannot be scheduled as part of a parallel graph loop.
*
* @return True if this node type manages its own internal state information
*/
bool(CARB_ABI* hasState)(const NodeTypeObj& nodeType);
/**
* Sets a flag that indications state information exists on the node. This is mainly used for determining how
* nodes can be safely scheduled for execution.
*
* @param[in] nodeHasState New value for the flag indicating if the node has state information
*/
void(CARB_ABI* setHasState)(const NodeTypeObj& nodeType, bool nodeHasState);
/**
* Returns the path to this node type object, so that nodes could be created under this path in the case
* of compound nodes
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @return path to the nodeTypeObj
*/
const char*(CARB_ABI* getPath)(const NodeTypeObj& nodeType);
/**
* Registers task functions that the node intends to schedule via Realm. This is called once for each node type
* during the lifetime of the process after the Realm runtime has been initialized.
*
*/
void(CARB_ABI* registerTasks)();
/**
* Returns the set of all metadata on this node.
*
* The keyBuf and valueBuf arrays preallocated by the caller, and contain at least "getMetadataCount()"
* entries in them.
* All returned strings are owned by the node type and not to be destroyed.
* The returned keyBuf and valueBuf must have exactly the same size with corresponding index values; that is
* keyBuf[i] is the metadata name for the string in valueBuf[i].
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object
* @param[out] keyBuf Buffer in which to put the list of metadata keys
* @param[out] valueBuf Buffer in which to put the list of metadata values
* @param[in] bufferSize the number of strings each of the two buffers is able to hold
* @return Number of metadata items successfully populated
*/
size_t(CARB_ABI* getAllMetadata)(const NodeTypeObj& nodeType,
const char** keyBuf,
const char** valueBuf,
size_t bufferSize);
/**
* Retrieves a metadata value from this node type
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object
* @param[in] key The name of the metadata to be retrieved
* @return The value of the metadata, or nullptr if the named metadata was not set on this node type
*/
const char*(CARB_ABI* getMetadata)(const NodeTypeObj& nodeType, const char* key);
/**
* Returns the number of metadata entries on this node
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object
* @return the number of metadata key/value pairs on this node
*/
size_t(CARB_ABI* getMetadataCount)(const NodeTypeObj& nodeType);
/**
* Sets a metadata value on this node type.
*
* Certain metadata keywords have special meaning internally:
* _extension_: The name of the extension from which the node type was loaded
* uiName: The name of the node type in a longer, human-readable format
*
* Note: The main way for metadata to be set is through the .ogn format files. If you call this directly the
* metadata will not persist across sessions. If you wish to define metadata outside of the .ogn file
* the best method is to override the initializeType() method in your node definition and set it there.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object
* @param[in] key The keyword, used as the name of the metadata
* @param[in] value The value of the metadata. Metadata can be parsed later if non-string values are desired.
* @return true if the keyword was successfully set
*/
void(CARB_ABI* setMetadata)(const NodeTypeObj& nodeType, const char* key, const char* value);
/**
* Adds a sub-nodetype to an existing node type. This is used for Python and Compounds, where there
* is a global type that houses all the type information for all the nodes registered in the system.
* Each of those specific node types is a sub-nodetype to the bigger container node type
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object
* @param[in] subNodeTypeName Name of the sub-nodeType
* @param[in] subNodeType Reference to the NodeTypeObj struct representing the sub-nodetype object
*/
void(CARB_ABI* addSubNodeType)(const NodeTypeObj& nodeType,
const char* subNodeTypeName,
const NodeTypeObj& subNodeType);
/**
* Retrieves a sub-nodetype to an existing node type. This is mainly used for Python nodes, where there is a
* global PythonNode type that houses all the type information for all the python nodes registered in the
* system. Each of those specific python node types is a sub-nodetype to the bigger python node type
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object
* @param[in] subNodeTypeName Name of the sub-nodeType
* @return The NodeTypeObj of sub-nodetype
*/
NodeTypeObj(CARB_ABI* getSubNodeType)(const NodeTypeObj& nodeType, const char* subNodeTypeName);
/**
* Creates a new nodeType, but without the interface portion. It only contains the "handle" portion
* of the NodeTypeObj - that is, a handle to an underlying object that can store the inputs/outputs of
* a node type. This is currently mainly used for Python types, where the functions are stored not in
* INodeType struct as other types, but elsewhere.
*
* @param[in] the name of the new node type to be created.
* @param[in] the version of the new node type to be created.
* @return The NodeTypeObj created without the INodeType portion.
*/
NodeTypeObj(CARB_ABI* createNodeType)(const char* nodeTypeName, int version);
/**
* Gets the number of scheduled instances for the node type object so that an internal buffer of the appropriate
* size can be created. The number of scheduled instances should match the number created in getScheduleNode.
*
* This function is called as the graph is evaluated and a list of upstream nodes is provided such that the
* number of scheduled instances for the node can be dependent on the results of upstream evaluations.
*
* Currently, we only support either zero or one scheduled instances.
*
* Deprecated: Use action graph for conditional scheduling
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] upstreamScheduleNodesBuf pointer to an array of upstream schedule nodes
* @param[in] upstreamBufferSize size of the upstream schedule node buffer
* @return The number of scheduled instances expected for the node
*/
size_t(CARB_ABI* getScheduleNodeCount)(const GraphContextObj& context,
const NodeObj& node,
const ScheduleNodeObj* upstreamScheduleNodesBuf,
size_t upstreamBufferSize);
/**
* Gets/Creates the scheduled instances for the node type object. The number of scheduled instances should match the
* return value of getScheduleNodeCount.
*
* This function is called as the graph is evaluated and a list of upstream nodes is provided such that the
* scheduled instances for the node can be dependent on the results of upstream evaluations.
*
* Currently, we only support either zero or one scheduled instances.
*
* Deprecated: Use action graph for conditional scheduling
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] upstreamScheduleNodesBuf pointer to an array of upstream schedule nodes
* @param[in] upstreamBufferSize size of the upstream schedule node buffer
* @param[out] scheduleNodesBuf pointer to an output array of schedule nodes for this node
* @param[out] upstreamBufferSize size of the output schedule node buffer
* @param[in] bufferSize size of scheduleNodesBuf array
*/
void(CARB_ABI* getScheduleNodes)(const GraphContextObj& context,
const NodeObj& node,
const ScheduleNodeObj* upstreamScheduleNodesBuf,
size_t upstreamBufferSize,
ScheduleNodeObj* scheduleNodesBuf,
size_t bufferSize);
/**
* This function is called when an extended type attribute has been resolved due to a connection change. The node
* can then choose to call IAttribute::setResolvedType() on extended type attributes according to its
* internal logic.
*
* @param[in] node Reference to the NodeObj struct representing the node object
*/
void(CARB_ABI* onConnectionTypeResolve)(const NodeObj& node);
/**
* Return whether this node type is a singleton (only 1 instance allowed per graph instance, and its subgraphs)
*
* A node type can be made to be singleton, in which case only 1 instance of that type of Node will be allowed
* per graph.
*
* @return True if this node type is a singleton
*/
bool(CARB_ABI* isSingleton)(const NodeTypeObj& nodeType);
/**
* Runs the inspector on the data in the given node type.
*
* @param[in] nodeTypeObj The node type on which the inspector runs
* @param[in] inspector The inspector class
* @return true if the inspection ran successfully, false if the inspection type is not supported
*/
bool(CARB_ABI* inspect)(const NodeTypeObj& nodeTypeObj, inspect::IInspector* inspector);
/**
* Returns the number of subnode types on this node type.
*
* @param[in] nodeType Reference to node type object for which subnode types are to be found
* @return the number of subnode types owned by this node type
*/
size_t(CARB_ABI* getSubNodeTypeCount)(const NodeTypeObj& nodeType) ;
/**
* Returns the set of all subnode types of this node type.
*
* The subNodeTypeBuf array must be preallocated by the caller, and contain at least "bufferSize" entries.
* The contents of both buffers are owned by the interface and should not be freed by the caller.
*
* @param[in] nodeType Reference to node type object for which subnode types are to be found
* @param[out] subNodeTypeNameBuf Buffer in which to put the list of subnode type names
* @param[out] subNodeTypeBuf Buffer in which to put the list of subnode type definitions
* @param[in] bufferSize the number of strings each of the two buffers is able to hold
* @return Number of subnode types successfully populated
*/
size_t(CARB_ABI* getAllSubNodeTypes)(const NodeTypeObj& nodeType,
const char** subNodeTypeNameBuf,
NodeTypeObj* subNodeTypeBuf,
size_t bufferSize);
/**
* Removes a sub-nodetype from an existing node type. This is mainly used for Python nodes, where there is a
* global PythonNode type that houses all the type information for all the python nodes registered in the
* system. Each of those specific python node types is a sub-nodetype to the bigger python node type. When a
* Python module is unloaded it should be removing any sub-nodetypes it has added.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @param[in] subNodeTypeName Name of the sub-nodeType to be removed
* @return True if the removal succeeded, false if not (most likely because the sub-nodetype did not exist)
*/
bool(CARB_ABI* removeSubNodeType)(const NodeTypeObj& nodeType,
const char* subNodeTypeName);
/**
* Get the currently defined scheduling hints for this node type.
* Call the inline function unless you want to manage your own reference counts.
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @return Scheduling hints pertaining to scheduling for this node type.
*/
ISchedulingHints*(CARB_ABI* getSchedulingHints)(const NodeTypeObj& nodeType);
/**
* Get a pointer to the currently defined scheduling hints for this node type.
* This version lets you manage your own reference counts.
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @return Scheduling hints pertaining to scheduling for this node type.
*/
inline omni::core::ObjectPtr<ISchedulingHints> getSchedulingHintsPtr(const NodeTypeObj& nodeType) const
{
return omni::core::steal(getSchedulingHints(nodeType));
}
/**
* Set the scheduling hints for this node type.
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @param[in] newSchedulingHints Scheduling hints pertaining to scheduling for this node type.
*/
void(CARB_ABI* setSchedulingHints)(const NodeTypeObj& nodeType, ISchedulingHints* newSchedulingHints);
/**
* Returns the name of the node type
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object
* @return The name of the node type
*/
const char*(CARB_ABI* getTypeName)(const NodeTypeObj& nodeType);
/**
* Destroys a node type object, including any sub-nodetype objects attached to it.
*
* @param[in] The nodeType to destroy
* @return true if the node type is destroyed, false otherwise
*/
bool(CARB_ABI* destroyNodeType)(const NodeTypeObj& nodeType);
/**
* Retrieves a sub-nodetype to an existing node type by it's SdfPath. This is mainly used for compound
* nodes where subnode types that are stored have a backing Prim on the stage.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object
* @param[in] pathToSubNodeType Stage path to the subnode type
* @return The NodeTypeObj of sub-nodetype
*/
NodeTypeObj(CARB_ABI* getSubNodeTypeByPath)(const NodeTypeObj& nodeType, const char* pathToSubNodeType);
/**
* Determines if the node type is a compound node type. A compound node type is a node type that references
* an OmniGraph to define the computation.
*
* @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object
* @return True if the node type is a compound node, false otherwise
*/
bool(CARB_ABI* isCompoundNodeType)(const NodeTypeObj& nodeType);
/**
* Implementation of computeVectorized, see above GraphContext for functions to pull/push data with other nodes
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] count The number of vectorized instances available for compute
* @return true if compute is successful, false otherwise
*/
size_t(CARB_ABI* computeVectorized)(const GraphContextObj& context, const NodeObj& node, size_t count);
/**
* Called whenever a graph instance is being removed form the stage.
* This is an opportunity to release any memory allocated specifically for this instance
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] instanceID The instanceID as would be returned by INode::getGraphInstanceID
*/
void (CARB_ABI* releaseInstance)(const NodeObj& node, NameToken instanceID);
/**
* Called by the framework when an OGN database previously created by the provided callback in INode::getOgnDatabase
* needs to be destroyed
*
* @param[in] node Reference to the NodeObj struct representing the node object previously used to create the DB
* @param[in] db An OGN database previously created by provided callback in INode::getOgnDatabase
*/
void(CARB_ABI* destroyDB)(const NodeObj& node, ogn::OmniGraphDatabase* db);
/**
* Called by the framework in order to notify an OGN database previously created by the provided callback in
* INode::getOgnDatabase that a type resolution event has happened on an attribute
*
* @param[in] attrib Reference to the AttributeObj struct representing the attribute object that just (un)resolved its type
* @param[in] db An OGN database previously created by provided callback in INode::getOgnDatabase
*/
void(CARB_ABI* notifyTypeResolution)(AttributeObj const& attrib, ogn::OmniGraphDatabase* db);
/**
* Called by the framework in order to notify an OGN database that some dynamic attributes been added or removed.
*
* @param[in] db An OGN database previously created by provided callback in INode::getOgnDatabase
* @param[in] attrib The attribute that is created or removed
* @param[in] isAttributeCreated If true, the attribute is newly created, otherwise it is going to be removed
*/
void(CARB_ABI* notifyDynamicAttributeChanged)(ogn::OmniGraphDatabase* db, AttributeObj const& attrib, bool isAttributeCreated);
/**
* Returns the ABI version against which the extension has been built
*/
carb::Version(CARB_ABI* getCarbABIVersion)();
//////////////////////////////////////////////////////////////////////////////////////
// REMINDER REMINDER REMINDER REMINDER REMINDER REMINDER
//////////////////////////////////////////////////////////////////////////////////////
// Any change made in this ABI should come with a change in either (or both of):
// - OmniGraphNode_ABI::populateNodeTypeInterface()
// - NodeTypeRegistration::NodeTypeRegistration()
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(INodeType, getCarbABIVersion, 42)//Check the reminder above
// ======================================================================
/** Interface to a single node in a graph */
struct INode
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::INode", 4, 7);
/**
* Returns the number of attributes on this node
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @return the number of attributes on this node
*/
size_t(CARB_ABI* getAttributeCount)(const NodeObj& node);
/**
* Returns the attributes on this node
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[out] attrsBuf Buffer to hold the return attribute objects
* @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold
* @return true if successful, false otherwise
*/
bool(CARB_ABI* getAttributes)(const NodeObj& node, AttributeObj* attrsBuf, size_t bufferSize);
/**
* Retrieves whether the attribute in question exists or not
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] attrName Name of the attribute on the node
* @return true if the attribute exists on the node, false if not
*/
bool(CARB_ABI* getAttributeExists)(const NodeObj& node, const char* attrName);
/** @private Retired - do not use */
void (CARB_ABI* retired_1)(NodeObj&);
/**
* Retrieves an attribute that points to the attribute on the current node.
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] attrName Name of the attribute on the node
* @return the attribute object requested
*/
AttributeObj(CARB_ABI* getAttribute)(const NodeObj& node, const char* attrName);
/**
* Retrieves an attribute that points to the attribute on the current node.
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] attrName Name of the attribute on the node
* @return the attribute object requested
*/
AttributeObj(CARB_ABI* getAttributeByToken)(const NodeObj& node, NameToken attrName);
/**
* Retrieves the prim path to the node
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @return the prim path to the node
*/
const char*(CARB_ABI* getPrimPath)(const NodeObj& node);
/**
* Retrieves the user data set on the node
*
* @param[in] node Reference to the NodeObj struct representing the node object
*/
void*(CARB_ABI* getUserData)(const NodeObj& node);
/**
* Retrieves the user data set on the node
*
* @param[in] node Reference to the NodeObj struct representing the node object
*/
void(CARB_ABI* setUserData)(const NodeObj& node, void* userData);
/** @private Retired - do not use */
void (CARB_ABI* retired_2)(NodeObj&);
/** @private Retired - do not use */
bool (CARB_ABI* retired_3)(NodeObj&);
/** @private Retired - do not use */
void (CARB_ABI* retired_4)(NodeObj&);
/**
* Returns the graph that this node belongs to
* @param[in] node Reference to the NodeObj struct representing the node object
* @return GraphObj structure containing the graph this node belongs to
*/
GraphObj(CARB_ABI* getGraph)(const NodeObj& node);
/** @private Deprecated - do not use */
CARB_DEPRECATED("Deprecated - use getNodeTypeObj")
INodeType(CARB_ABI* getNodeType)(const NodeObj&);
/** @private Deprecated - do not use */
CARB_DEPRECATED("Deprecated - use getNodeTypeObj().getTypeName()")
const char*(CARB_ABI* getPythonNodeType)(const NodeObj&);
/**
* Returns the node is disabled
* @param[in] node Reference to the NodeObj struct representing the node object
* @return true if disabled, false otherwise
*/
bool(CARB_ABI* isDisabled)(const NodeObj& node);
/**
* Sets the disabled state on the node
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] disable Whether to disable the node
*/
void(CARB_ABI* setDisabled)(const NodeObj& node, bool disable);
/**
* Lets the evaluation system know that the compute for this node is complete for this frame but not completed
* overall. For example, once an animation is triggered, we want the animation to play until completion - on
* a particular frame, the animation for that frame may be complete, but we're not done until the overall
* animation is completed. This method is only meaningful for evaluators that implement standard flow graph
* semantics.
*
* @param[in] node Reference to the NodeObj struct representing the node object
*
*/
void(CARB_ABI* setComputeIncomplete)(const NodeObj& node);
/**
* Returns whether the node has an USD representation on the stage.
* @param[in] node Reference to the NodeObj struct representing the node object
* @return true if the node is backed by USD, false otherwise
*/
bool(CARB_ABI* isBackedByUsd)(const NodeObj& node);
/**
* Creates a dynamic attribute on the node.
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] name Name of the attribute
* @param[in] attributeType The Type of the attribute. Extended attributes are Token types.
* @param[in] value Pointer to the location containing the initial value of the attribute
* @param[in] elemCount Number of elements in the attribute -- nullptr if the attribute is a scalar
* @param[in] portType Whether this attribute is an input, output, or state
* @param[in] extendedAttrType The type of extended attribute to create, if any (see definition of
* ExtendedAttributeType)
* @param[in] unionTypes In case the extendedAttrType is union, unionTypes is a comma separated
* string that lists the allowable concrete types in the union
* @return true if the attribute was created, false otherwise
*/
bool(CARB_ABI* createAttribute)(const NodeObj& node,
const char* name,
Type attributeType,
const void* value,
const size_t* elemCount,
AttributePortType portType,
ExtendedAttributeType extendedAttrType,
const char* unionTypes);
/**
* Removes a dynamic attribute from the node
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] name Name of the attribute
* @return true if the attribute was removed, false if the attribute was not found
*/
bool(CARB_ABI* removeAttribute)(const NodeObj& node, const char* name);
/** @private Deprecated - do not use */
CARB_DEPRECATED("Use EF Framework to customize task generation by nodes")
ScheduleNodeObj(CARB_ABI* createScheduleNode)(const NodeObj& node);
/**
* Registers a callback to be invoked when any attribute of the current node is connected
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] connectionCallback The struct containing the callback to trigger, and a piece of user data.
* The parameters are the attributes of this and the other node being connected, and a void* of user data
* @return true for success, false for failure
*/
bool(CARB_ABI* registerConnectedCallback)(const NodeObj& node,
ConnectionCallback connectionCallback);
/**
* Registers a callback to be invoked when any attribute of the current node is disconnected
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] connectionCallback The struct containing the callback to trigger, and a piece of user data.
* The parameters are the attributes of this and the other node being disconnected, and a void* of user data
* @return true for success, false for failure
*/
bool(CARB_ABI* registerDisconnectedCallback)(const NodeObj& node,
ConnectionCallback connectionCallback);
/**
* Deregisters the callback to be invoked when any attribute of the current node is connected
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] connectionCallback The struct containing the callback to trigger, and a piece of user data.
*/
void(CARB_ABI* deregisterConnectedCallback)(const NodeObj& node, ConnectionCallback connectionCallback);
/**
* Deregisters the callback to be invoked when any attribute of the current node is disconnected
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] connectionCallback The struct containing the callback to trigger, and a piece of user data.
*/
void(CARB_ABI* deregisterDisconnectedCallback)(const NodeObj& node, ConnectionCallback connectionCallback);
/**
* When we are using dynamic scheduling (where the exact amount of work is not known upfront), a node may
* try to suppress downstream nodes from executing based on runtime execution information. To do this,
* it needs to turn on dynamic downstream control. This function returns whether the node is participating
* in this scheme. A good use case for this feature is where we have a lot of prim nodes ticking
* unnecessarily, consuming cycles. We can turn them off using this feature with information available
* at compute time.
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @return Whether this node is setup to participate in dynamic scheduling by dynamically controlling the
* scheduling of downstream nodes.
*/
bool(CARB_ABI* getDynamicDownstreamControl)(const NodeObj& node);
/**
* When we are using dynamic scheduling (where the exact amount of work is not known upfront), nodes may
* try to suppress downstream nodes from executing based on runtime execution information. To do this,
* it needs to turn on dynamic downstream control. This function sets whether the node is participating
* in this scheme. You need to call this function on the upstream node that is suppressing downstream
* activity.
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] value Whether to turn the flag on or off on the node
* @return Whether this node is setup to participate in dynamic scheduling by dynamically controlling the
* scheduling of downstream nodes.
*/
void(CARB_ABI* setDynamicDownstreamControl)(const NodeObj& node, bool value);
/**
* Returns the NodeTypeObj structure associated with this node.
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @return NodeTypeObj encapsulating the node type from which this node was created
*/
NodeTypeObj(CARB_ABI* getNodeTypeObj)(const NodeObj& node);
/**
* Resolves attribute types given a set of attributes which are fully type coupled.
* For example if node 'Increment' has one input attribute 'a' and one output attribute 'b'
* and the types of 'a' and 'b' should always match. If the input is resolved then this function will
* resolve the output to the same type.
* It will also take into consideration available conversions on the input side.
* The type of the first (resolved) provided attribute will be used to resolve others or select appropriate conversions
*
* Note that input attribute types are never inferred from output attribute types.
*
* This function should only be called from the INodeType function `onConnectionTypeResolve`
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[out] attrsBuf Buffer that holds the attributes to be resolved as a coupled group
* @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold
* @return true if successful, false otherwise, usually due to mismatched or missing resolved types
*/
bool(CARB_ABI* resolveCoupledAttributes)(const NodeObj& node, AttributeObj* attrsBuf, size_t bufferSize);
/**
* Resolves attribute types given a set of attributes, that can have differing tuple counts and/or array depth,
* and differing but convertible base data type.
* The three input buffers are tied together, holding the attribute, the tuple
* count, and the array depth of the types to be coupled.
* This function will solve base type conversion by targeting the first provided type in the list,
* for all other ones that require it.
*
* For example if node 'makeTuple2' has two input attributes 'a' and 'b' and one output 'c' and we want to resolve
* any float connection to the types 'a':float, 'b':float, 'c':float[2] (convertible base types and different tuple counts)
* then the input buffers would contain:
* attrsBuf = [a, b, c]
* tuplesBuf = [1, 1, 2]
* arrayDepthsBuf = [0, 0, 0]
* rolesBuf = [AttributeRole::eNone, AttributeRole::eNone, AttributeRole::eNone]
*
* This is worth noting that 'b' could be of any type convertible to float. But since the first provided
* attribute is 'a', the type of 'a' will be used to propagate the type resolution.
*
* Note that input attribute types are never inferred from output attribute types.
*
* This function should only be called from the INodeType function `onConnectionTypeResolve`
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] attrsBuf Buffer that holds the attributes to be resolved as a coupled group
* @param[in] tuplesBuf Buffer that holds the tuple count desired for each corresponding attribute. Any value
* of kUninitializedTypeCount indicates the found tuple count is to be used when resolving.
* @param[in] arrayDepthsBuf Buffer that holds the array depth desired for each corresponding attribute. Any value
* of kUninitializedTypeCount indicates the found array depth is to be used when resolving.
* @param[in] rolesBuf Buffer that holds the role desired for each corresponding attribute. Any value of
* AttributeRole::eUnknown indicates the found role is to be used when resolving.
* @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold
* @return true if successful, false otherwise, usually due to mismatched or missing resolved types
*/
bool(CARB_ABI* resolvePartiallyCoupledAttributes)(const NodeObj& node,
const AttributeObj* attrsBuf,
const uint8_t* tuplesBuf,
const uint8_t* arrayDepthsBuf,
const AttributeRole* rolesBuf,
size_t bufferSize);
/** @private Deprecated - do not use */
CARB_DEPRECATED("Use USD notice handling to monitor changes")
bool(CARB_ABI* registerPathChangedCallback)(const NodeObj&, PathChangedCallback);
/** @private Deprecated - do not use */
CARB_DEPRECATED("Use USD notice handling to monitor changes")
void(CARB_ABI* deregisterPathChangedCallback)(const NodeObj& node, PathChangedCallback pathChangedCallback);
/**
* Returns the graph wrapped by this node, if any (as opposed to the graph this node belongs to)
* @param[in] node Reference to the NodeObj struct representing the node object
* @return GraphObj structure containing the graph wrapped by this node
*/
GraphObj(CARB_ABI* getWrappedGraph)(const NodeObj& node);
/**
* Returns the interface for the event stream generated by changes to this node.
* @return The IEventStreamPtr that pumps events when node changes happen
*/
carb::events::IEventStreamPtr(CARB_ABI* getEventStream)(const NodeObj& node);
/**
* Returns whether the NodeObj's handle is still valid
* @param[in] node Reference to the NodeObj struct representing the node object
* @return true if valid, false otherwise
*/
bool (CARB_ABI* isValid)(const NodeObj& node);
/**
* Requests that the given node be computed at the next graph evaluation. This is for use with nodes that are
* marked as being RequestDrivenCompute.
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @return true if the request was successful, false if there was an error
*/
bool(CARB_ABI* requestCompute)(const NodeObj& node);
/**
* Returns a NobeObj for the given NodeHandle if the referenced node is valid.
*
* @param[in] nodeHandle The NodeHandle for the compute node
* @return The NodeObj representing the node. In case of failure, the node handle
* will be kInvalidNodeHandle
*/
NodeObj(CARB_ABI* getNodeFromHandle)(const NodeHandle nodeHandle);
/**
* Returns the number of times compute() has been called on this node since the
* counter last rolled over to 0.
*
* @param[in] node Reference to the NodeObj struct representing the node object.
* @return The count.
*/
size_t (CARB_ABI* getComputeCount)(const NodeObj& node);
/** @private Deprecated - do not use */
CARB_DEPRECATED("Use increaseComputeCount instead")
size_t (CARB_ABI* incrementComputeCount)(const NodeObj& node);
/** @private Deprecated - do not use */
CARB_DEPRECATED("Use logComputeMessageOnInstance instead")
bool (CARB_ABI* logComputeMessage)(const NodeObj& node, ogn::Severity severity, const char* message);
/**
* Returns the number of compute messages of the given severity logged for the node.
*
* Compute messages are cleared at the start of each evaluation, so the count will be
* only for the most recent evaluation.
*
* @param[in] node Reference to the NodeObj struct representing the node object.
* @param[in] severity Severity level of the messages.
* @return The number of compute messages of the specified severity.
*/
size_t (CARB_ABI* getComputeMessageCount)(const NodeObj& node, ogn::Severity severity);
/**
* Returns a specified compute message of the given severity logged for the node.
*
* Use getComputeMessageCount() to determine the number of messages currently available.
*
* @param[in] node Reference to the NodeObj struct representing the node object.
* @param[in] severity Severity level of the message to return.
* @param[in] index Index of the message to return, starting at 0.
* @return The requested message or nullptr if 'index' was out of range.
*/
const char* (CARB_ABI* getComputeMessage)(const NodeObj& node, ogn::Severity severity, size_t index);
/**
* Clears all compute messages logged for the node prior to its most recent evaluation.
* Messages from its most recent evaluation are left untouched.
*
* @param[in] node Reference to the NodeObj struct representing the node object.
* @return The number of messages that were deleted.
*/
size_t (CARB_ABI* clearOldComputeMessages)(const NodeObj& node);
/**
* Retrieve the OGN database for the current active instance of this node. The DB is maintained up2date internally by the framework.
* If it does not exists the provided ognCreate callback will be invoked to instantiate it
*
* @param[in] node Reference to the NodeObj struct representing the node object.
* @param[in] ognCreate A callback that allocates and returns the DB associated to the current active instance of this node. Ownership is transfered to the node
*/
ogn::OmniGraphDatabase*(CARB_ABI* getOgnDatabase)(const NodeObj& node, CreateDbFunc ognCreate);
/**
* Returns whether this node is a compound node. A compound node is a node whose node type is defined by a subgraph instead
* of a built-in type.
*
* @param[in] node Reference to the NodeObj struct representing the node object.
* @returns True if the provided node is a compound node, false otherwise
*/
bool(CARB_ABI* isCompoundNode)(const NodeObj& node);
/**
* Retrieve a persistent ID for the current active graph associated to this node, optionally offseted
*
* @param[in] node Handle representing the node object.
* @param[in] instanceOffset: In vectorized context, the instance index relative to the currently targeted graph
* @return A unique and process-persistent ID that represents the current active instance of this node
*/
NameToken (CARB_ABI* getGraphInstanceID)(NodeHandle node, InstanceIndex instanceOffset);
/**
* Returns a Stamp that is incremented any time an input or state attribute is changed outside of graph evaluation.
* For example, interactively or through a script.
*
* @param[in] node Reference to the NodeObj struct representing the node object.
* @returns The stamp value
*/
exec::unstable::Stamp(CARB_ABI* getAttributeChangeStamp)(const NodeObj& node);
/**
* Returns a handle to the associated sub-graph, if the given node is a compound node.
*
* @param[in] node Handle representing the node object.
* @return The GraphObj representing the graph. In case of failure, the graph handle
* will be kInvalidGraphHandle.
*/
GraphObj (CARB_ABI* getCompoundGraphInstance)(const NodeObj& node);
/**
* Query all the node and context handles that the provided node emulate when used in an auto instancing scenario.
*
* @param[in] node Handle representing the "master" node object, the one that handle the execution.
* @param[out] graphContexts A pointer reference that will be set to an array of all the emulated auto-instanced graph contexts
* @param[out] nodeObjects A pointer reference that will be set to an array of all the emulated auto-instanced nodes
* @return The number of elements in the returned arrays, 1 if there is not auto instancing associated to the provided node (itself)
*/
size_t(CARB_ABI* getAutoInstances)(const NodeObj& node, GraphContextObj const*& graphContexts, NodeObj const*& nodeObjects);
/** @private Retired - do not use */
bool (CARB_ABI* retired_5)(const NodeObj& nodeObj);
/**
* Logs a compute message of a given severity for the node.
*
* This method is intended to be used from within the compute() method of a
* node to alert the user to any problems or issues with the node's most recent
* evaluation. They are accumulated until the start of the next compute,
* at which point they are cleared.
*
* If duplicate messages are logged, with the same severity level, only one is
* stored.
*
* @param[in] node Reference to the NodeObj struct representing the node object.
* @param[in] inst In vectorized context, the instance index relative to the currently targeted graph
* @param[in] severity Severity level of the message.
* @param[in] message The message.
* @return Returns true if the message has already been logged, false otherwise.
*/
bool(CARB_ABI* logComputeMessageOnInstance)(const NodeObj& node, InstanceIndex inst, ogn::Severity severity, const char* message);
/**
* Increase the node's compute counter by the provided amount.
*
* This method is provided primarily for debugging and experimental uses and
* should not normally be used by end-users.
*
* @param[in] node Reference to the NodeObj struct representing the node object
* @param[in] count the number to be added to the compute count.
* @return The new count.
*/
size_t(CARB_ABI* increaseComputeCount)(const NodeObj& node, size_t count);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(INode, increaseComputeCount, 52)
/**
* Defines the node event types.
*/
enum class INodeEvent
{
eCreateAttribute, //!< Dynamic attribute added to a node
eRemoveAttribute, //!< Dynamic attribute removed from a node
eAttributeTypeResolve //!< Extended-type attribute resolution has changed
};
/**
* Defines the graph event types
*/
enum class IGraphEvent
{
eCreateVariable, ///< Variable has been added to the graph
eRemoveVariable, ///< Variable has been removed from the graph
eClosing, ///< Stage is closing
eComputeRequested, ///< INode::requestCompute was called on a contained node
eNodeAttributeChange,///< An input or state attribute changed outside of graph evaluation
eVariableTypeChange ///< A variable in the graph had its type changed
};
// ======================================================================
/** Interface to an OmniGraph, several of which may be present in a scene */
struct IGraph
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::IGraph", 3, 13);
/**
* Returns the number of nodes in the graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return The number of nodes in the graph
*/
size_t(CARB_ABI* getNodeCount)(const GraphObj& graphObj);
/**
* Get the nodes in the graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[out] nodesBuf Buffer to hold the return NodeObjs
* @param[in] bufferSize the number of NodeObj structures the buffer is able to hold
* @return true on success, false on failure
*/
bool(CARB_ABI* getNodes)(const GraphObj& graphObj, NodeObj* nodesBuf, size_t bufferSize);
/**
* Returns the number of subgraphs in the graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return The number of subgraphs in the graph
*/
size_t(CARB_ABI* getSubgraphCount)(const GraphObj& graphObj);
/**
* Get the subgraphs in the graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[out] graphsBuf Buffer to hold the return GraphObjs
* @param[in] bufferSize the number of GraphObjs structures the buffer is able to hold
* @return true on success, false on failure
*/
bool(CARB_ABI* getSubgraphs)(const GraphObj& graphObj, GraphObj* graphsBuf, size_t bufferSize);
/**
* Get a particular subgraph in the graph given its path
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] subgraphPath the path to the subgraph in question
* @return The GraphObj representing the graph. In case of failure, the graph handle
* will be kInvalidGraphHandle
*/
GraphObj(CARB_ABI* getSubgraph)(const GraphObj& graphObj, const char* subgraphPath);
/**
* Returns the path to the graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return The path to the graph (may be empty)
*/
const char*(CARB_ABI* getPathToGraph)(const GraphObj& graphObj);
/**
* Returns whether the current graph is disabled
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return true if the current graph is disabled false otherwise
*/
bool(CARB_ABI* isDisabled)(const GraphObj& graphObj);
/**
* Sets the disabled state of the graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] disable Whether or not to disable the current graph
*/
void(CARB_ABI* setDisabled)(const GraphObj& graphObj, bool disable);
/**
* Get the default graph context associated with the graph. Each graph has a default
* context - it's usually one that gives you evaluation of the graph on the current
* time, for example.
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return The GraphContextObj representing the default graph context associated with
* the node.
*/
GraphContextObj(CARB_ABI* getDefaultGraphContext)(const GraphObj& graphObj);
/**
* Get a particular node in the graph given its path
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] nodePath the path to the node in question
* @return The NodeObj representing the node. In case of failure, the node handle
* will be kInvalidNodeHandle
*/
NodeObj(CARB_ABI* getNode)(const GraphObj& graphObj, const char* nodePath);
/**
* Create a node in the graph at a given path with a given type
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] nodePath the path to where the node will be added
* @param[in] nodeType the type name of the node to add
* @param[in] createUsd Whether to create USD backing for the node being created
* @return The NodeObj representing the node. In case of failure, the node handle
* will be kInvalidNodeHandle
*/
NodeObj(CARB_ABI* createNode)(GraphObj& graphObj, const char* nodePath, const char* nodeType, bool createUsd);
/**
* Destroy the node in the graph at a given path
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] nodePath the path of the node to destroy
* @param[in] destroyUsd Whether to destroy USD backing for the node being destroyed
* @return True, if the node was successfully destroyed. False otherwise.
*/
bool(CARB_ABI* destroyNode)(GraphObj& graphObj, const char* nodePath, bool destroyUsd);
/**
* Rename the node in the graph at a given path
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] nodePath the path of the node to rename
* @param[in] newPath the new path of the node
* @return True, if the node was successfully renamed. False otherwise.
*/
bool(CARB_ABI* renameNode)(GraphObj& graphObj, const char* oldPath, const char* newPath);
/**
* Create a subgraph in the graph at a given path with a given type
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] subgraphPath the path to where the subgraph will be added
* @param[in] evaluator the evaluator type to use for the subgraph being created
* @param[in] createUsd whether to create USD backing for the subgraph being created
* @return The GraphObj representing the subgraph. In case of failure, the graph handle
* will be kInvalidGraphHandle
*/
GraphObj(CARB_ABI* createSubgraph)(GraphObj& graphObj, const char* subgraphPath, const char* evaluator, bool createUsd);
/**
* reload the graph settings based on ComputeGraphSettings
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
*/
void(CARB_ABI* reloadGraphSettings)(GraphObj& graphObj);
/**
* Rename the subgraph in the graph at a given path
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] subgraphPath the path of the subgraph to rename
* @param[in] newPath the new path of the subgraph
* @return True, if the subgraph was successfully renamed. False otherwise.
*/
bool(CARB_ABI* renameSubgraph)(GraphObj& graphObj, const char* oldPath, const char* newPath);
/**
* Notifies the graph that some attribute has changed on a node. This will trigger
* updates from things like lazy evaluation graphs, for example.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] Reference to the AttributeObj struct representing the attribute object
*/
void(CARB_ABI* onAttributeChanged)(const GraphObj& graphObj, const AttributeObj& attrObj);
/** @private Deprecated - do not use */
CARB_DEPRECATED("Will be removed in next major version, please use IGraph::inspect instead")
void(CARB_ABI* printDiagnostic)(const GraphObj&);
/**
* Register a callback to be invoked when a legacy file with an older file format version
* is detected. This callback is invoked before stage attach happens.
*
* @param[in] fileFormatUpgrade - structure containing the callback and a piece of user data to
* be passed back to the callback when invoked.
*/
void (CARB_ABI* registerPreLoadFileFormatUpgradeCallback)(FileFormatUpgrade fileFormatUpgrade);
/**
* Register a callback to be invoked when a legacy file with an older file format version
* is detected. This callback is invoked after stage attach happens.
*
* @param[in] fileFormatUpgrade - structure containing the callback and a piece of user data to
* be passed back to the callback when invoked.
*/
void (CARB_ABI* registerPostLoadFileFormatUpgradeCallback)(FileFormatUpgrade fileFormatUpgrade);
/**
* Deregisters the pre-load callback to be invoked when a legacy file with an older
* file format version is detected.
*
* @param[in] fileFormatUpgrade - structure containing the callback and a piece of user data to
* be passed back to the callback when invoked.
*
*/
void (CARB_ABI* deregisterPreLoadFileFormatUpgradeCallback)(FileFormatUpgrade fileFormatUpgrade);
/**
* Deregisters the post-load callback to be invoked when a legacy file with an older
* file format version is detected.
*
* @param[in] fileFormatUpgrade - structure containing the callback and a piece of user data to
* be passed back to the callback when invoked.
*/
void (CARB_ABI* deregisterPostLoadFileFormatUpgradeCallback)(FileFormatUpgrade fileFormatUpgrade);
/**
* Returns whether USD notice handling is enabled for the graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return true if the current graph has USD notice handling enabled
*/
bool(CARB_ABI* usdNoticeHandlingEnabled)(const GraphObj& graphObj);
/**
* Sets whether the USD notice handling is enabled for this graph. This is an
* advanced operation - do not use this method unless you know what you're doing.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] enable Whether or not to enable USD notice handling for this graph
*/
void(CARB_ABI* setUSDNoticeHandlingEnabled)(const GraphObj& graphObj, bool enable);
/**
* Runs the inspector on the data in the given graph.
*
* @param[in] graphObj The graph on which the inspector runs
* @param[in] inspector The inspector class
* @return true if the inspection ran successfully, false if the inspection type is not supported
*/
bool(CARB_ABI* inspect)(const GraphObj& graphObj, inspect::IInspector* inspector);
/**
* Create a new graph, wrapped as a node, at the given location.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] nodeName the name of the node that wraps the graph
* @param[in] graphPath the path to where the graph that the node will wrap will be added
* @param[in] evaluatorName the evaluator to use for the new graph
* @param[in] isGlobalGraph Whether this graph is a top level global graph
* @param[in] backByUSD Whether to back this graph by USD
* @param[in] backingType What kind of FC backs this graph
* @param[in] graphPipelineStage What pipeline stage this graph occupies
* @return The NodeObj representing the node that wraps the graph. The newly created graph can be
* retrieved from the node. In case of failure, the NodeObj will contain kInvalidNodeHandle
*/
NodeObj(CARB_ABI* createGraphAsNode)(GraphObj& graphObj,
const char* nodeName,
const char* graphPath,
const char* evaluatorName,
bool isGlobalGraph,
bool backByUSD,
GraphBackingType backingType,
GraphPipelineStage graphPipelineStage);
/**
* Reloads the graph from the stage by deleting the current graph and creating a new one to attach
* to the stage. Note: this is a complete reset - any stateful nodes will lose their state.
*
* @param[in] graphObj The graph to reload
*/
void(CARB_ABI* reloadFromStage)(const GraphObj& graphObj);
/**
* Returns the Fabric backing type for this graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return Fabric backing type. See GraphBackingType definition for details.
*/
GraphBackingType(CARB_ABI* getGraphBackingType)(const GraphObj& graphObj);
/**
* Returns the graph pipeline stage (eg. simulation, pre-render, post-render) for this graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return Graph pipeline stage. See GraphPipelineStage definition for details.
*/
GraphPipelineStage(CARB_ABI* getPipelineStage)(const GraphObj& graphObj);
/**
* Returns whether the GraphObj's handle is still valid
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return true if valid, false otherwise
*/
bool(CARB_ABI* isValid)(const GraphObj& graphObj);
/**
* Returns the FabricId for this Graph. This id can be used with the Fabric API, but should only
* be required for advanced use cases.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[out] fabricId The output user id
* @return true on success, false on failure
*/
bool(CARB_ABI* getFabricId)(const GraphObj& graphObj, omni::fabric::FabricId& fabricId);
/**
* Warning: this is an advanced function - do not call unless you know exactly what is involved here.
*
* This allows a graph to be "ticked" independently of the normal graph evaluation process, where
* graphs are ordered into different pipeline stages (simulation, pre-render, post-render), and all
* graphs of each stage are evaluated according to the order described in the orchestration graph in
* each stage.
*
* Instead, this function allows graphs in the custom pipeline stage to be evaluated on its own.
* If this function is being called from a different thread, it is the caller's responsibility to ensure
* that the Fabric backing the graph is independent (stage without history), otherwise data races
* will ensue. If this function is being called from the simulation / main thread, the the Fabric
* backing the graph may be a shared one.
*
* It is illegal to call this function for any graph other than those setup with the custom pipeline
* stage.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
*/
void (CARB_ABI* evaluate)(const GraphObj& graphObj);
/**
* Returns the parent of this graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return The parent graph (may be invalid)
*/
GraphObj(CARB_ABI* getParentGraph)(const GraphObj& graphObj);
/**
* Returns whether the path points to a top level graph prim.
*
* @param[in] path to the prim in question
* @return True if the path points to a prim that is a top level graph
*/
bool(CARB_ABI* isGlobalGraphPrim)(const char* path);
/**
* Registers a callback to be invoked at the end of graph evaluation for all of the nodes
* whose error status changed during that evaluation.
*
* This is provided primarily for UI purposes. E.g. highlighting nodes with compute errors
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] errorStatusChangeCallback - structure containing the callback and a piece of user data to
* be passed back to the callback when invoked.
*/
void (CARB_ABI* registerErrorStatusChangeCallback)(const GraphObj& graphObj, ErrorStatusChangeCallback errorStatusChangeCallback);
/**
* Deregisters a callback to be invoked at the end of graph evaluation for all of the nodes
* whose error status changed during that evaluation.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] errorStatusChangeCallback - structure containing the callback and a piece of user data to
* be passed back to the callback when invoked.
*
*/
void (CARB_ABI* deregisterErrorStatusChangeCallback)(const GraphObj& graphObj, ErrorStatusChangeCallback errorStatusChangeCallback);
/** @private deprecated - do not use */
CARB_DEPRECATED("Will be retired in next major version: this call is not necessary anymore and can be safely removed")
void(CARB_ABI* nodeErrorStatusChanged)(const GraphObj& graphObj, const NodeObj& nodeObj);
/**
* Returns the number of variables in the graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return The number of variables in the graph.
*/
size_t(CARB_ABI* getVariableCount)(const GraphObj& graphObj);
/**
* Get the variables defined in the graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[out] variableBuf Buffer to hold the returned IVariable objects
* @param[in] bufferSize The number of IVariable objects the buffer is able to hold
* @return true on success, false on failure
*/
bool(CARB_ABI* getVariables)(const GraphObj& graphObj, IVariablePtr* variableBuf, size_t bufferSize);
/**
* Create a new variable on the graph with the given name. The name must be unique
* among variables on the graph, even if the type is different.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] name The name to give the variable.
* @param[in] variableType the data type used to create the variable.
* @return The newly created variable, or null if the variable could not be created.
*/
IVariablePtr(CARB_ABI* createVariable)(const GraphObj& graphObj, const char* name, Type variableType);
/**
* Removes the given variable from the graph.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] variable The variable to remove from the graph.
* @return True if the variable was successfully removed, false otherwise.
*/
bool(CARB_ABI* removeVariable)(const GraphObj& graphObj, const IVariablePtr& variable);
/**
* Retrieves a variable with the given name.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] name The name of the variable to search for.
* @return The variable with the given name on the graph, or null if the variable does
* not exist.
*/
IVariablePtr(CARB_ABI* findVariable)(const GraphObj& graphObj, const char* name);
/**
* Change the pipeline stage (eg. simulation, pre-render, post-render) that this graph is in
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] newPipelineStage The new pipeline stage that this graph will be moved into
*/
void (CARB_ABI* changePipelineStage)(const GraphObj& graphObj, GraphPipelineStage newPipelineStage);
/**
* Returns the interface for the event stream generated by changes to this graph.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return The IEventStreamPtr that pumps events when graph changes happen
*/
carb::events::IEventStreamPtr(CARB_ABI* getEventStream)(const GraphObj& graphObj);
/**
* Returns the evaluation mode of the graph. The evaluation mode determines how the graph
* will be evaluated standalone or when referenced from an OmniGraphAPI component.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return The evaluation mode of the graph
*/
GraphEvaluationMode(CARB_ABI* getEvaluationMode)(const GraphObj& graphObj);
/**
* Sets the evaluation mode of the graph. The evaluation mode determines if the graph
* will be evaluated standalone or when referenced from an OmniGraphAPI component.
*
* @param[in] graphObj Reference to the graph object
* @param[in] evaluationMode the evaluation mode of the graph to set
*/
void(CARB_ABI* setEvaluationMode)(const GraphObj& graphObj, GraphEvaluationMode evaluationMode);
/**
* Create a new graph, wrapped as a node, at the given location.
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @param[in] options Options relating to creating a graph as a node.
* @return The NodeObj representing the node that wraps the graph. The newly created graph can be
* retrieved from the node. In case of failure, the NodeObj will contain kInvalidNodeHandle
*/
NodeObj(CARB_ABI* createGraphAsNodeV2)(GraphObj& graphObj, const CreateGraphAsNodeOptions& options);
/**
* Returns the name of the evaluator for the specified graph
*
* @param[in] graphObj Reference to the GraphObj struct representing the graph object
* @return The name of the evaluator, or the empty string if it isn't set.
*/
const char*(CARB_ABI* getEvaluatorName)(const GraphObj& graphObj);
/**
* Returns whether this graph is a compound graph instance. A compound graph is a subgraph that
* is parented to a compound node
*
* @return True if this graph is a compound graph, false otherwise.
*/
bool(CARB_ABI* isCompoundGraph)(const GraphObj& graphObj);
/**
* Returns the number of instance currently allocated for this graph
*
* @return The number of instances registered in the graph, 0 if the graph is standalone
*/
size_t(CARB_ABI* getInstanceCount)(const GraphObj& graphObj);
/**
* Returns whether this graph is an auto instance. An auto instance is a graph that got merged as an instance
* with all other similar graphs in the stage.
*
* @return True if this graph is an auto instance, false otherwise.
*/
bool(CARB_ABI* isAutoInstanced)(const GraphObj& graphObj);
/**
* Set whether or not this graph can be candidate for auto-instance merging (true by default)
*
* @return The old value of the allowed flag
*/
bool(CARB_ABI* setAutoInstancingAllowed)(const GraphObj& graphObj, bool allowed);
/**
* Returns the compound node for which this graph is the compound subgraph of.
*
* @return If this graph is a compound graph, the owning compound node. Otherwise, an invalid node is returned.
*/
NodeObj(CARB_ABI* getOwningCompoundNode)(const GraphObj& graphObj);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IGraph, getOwningCompoundNode, 52)
using DataAccessFlags = uint32_t; //!< Data type for specifying read/write access abilities
static constexpr DataAccessFlags kReadAndWrite = 0; //!< Data is accessible for both read and write
static constexpr DataAccessFlags kReadOnly = 1; //!< Data is only accessible for reading
static constexpr DataAccessFlags kWriteOnly = 2; //!< Data is only accessible for writing
// ======================================================================
/** Use this interface to pull data for compute node, and also push data to compute graph/cache */
struct IGraphContext
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::IGraphContext", 3, 7);
/**
* Returns the stage id the context is currently attached to
* @param[in] context structure containing both the interface and underlying object
* @return the USD stage id
*/
long int(CARB_ABI* getStageId)(const GraphContextObj& contextObj);
//---------------------------------------------------------------------------------------------
// wrappers for fabric
/** @private Retired - do not use */
void (CARB_ABI* retired_1)();
/** @private Retired - do not use */
void (CARB_ABI* retired_2)();
/** @private Deprecated - do not use */
size_t*(CARB_ABI* deprecated_12)(const GraphContextObj&, const AttributeObj&, DataAccessFlags);
/** @private Deprecated - do not use */
size_t*(CARB_ABI* deprecated_13)(const GraphContextObj&, const AttributeObj&, DataAccessFlags);
/** @private Retired - do not use */
void (CARB_ABI* retired_3)();
/** @private Retired - do not use */
void (CARB_ABI* retired_4)();
/** @private Retired - do not use */
void (CARB_ABI* retired_5)();
/** @private Retired - do not use */
void (CARB_ABI* retired_6)();
/** @private Retired - do not use */
void (CARB_ABI* retired_7)();
/** @private Retired - do not use */
void (CARB_ABI* retired_8)();
/**
* Returns the graph associated with this context
* @param[in] context structure containing both the interface and underlying object
* @return GraphObj structure containing the graph
*/
GraphObj(CARB_ABI* getGraph)(const GraphContextObj& context);
/**
* Returns the time between last evaluation of the graph and "now"
* @param[in] context structure containing both the interface and underlying object
* @return the elapsed time
*/
float(CARB_ABI* getElapsedTime)(const GraphContextObj& contextObj);
/**
* Returns the global playback time
* @param[in] context structure containing both the interface and underlying object
* @return the global playback time in seconds
*/
float(CARB_ABI* getTime)(const GraphContextObj& contextObj);
/**
* Returns the time between last evaluation of the graph and "now", in NS.
* Note this will only return valid values if the update loop is using the
* void updateSimStep(int64_t timeNS, carb::tasking::Counter* counter, bool) interface
* As of this writing, this is limited to the DS project
*
* @param[in] context structure containing both the interface and underlying object
* @return the elapsed time in nano seconds
*/
int64_t(CARB_ABI* getElapsedTimeNS)(const GraphContextObj& contextObj);
/**
* Returns the global time in NS.
* Note this will only return valid values if the update loop is using the
* void updateSimStep(int64_t timeNS, carb::tasking::Counter* counter, bool) interface
* As of this writing, this is limited to the DS project
*
* @param[in] context structure containing both the interface and underlying object
* @return the global time in nano seconds
*/
int64_t(CARB_ABI* getTimeNS)(const GraphContextObj& contextObj);
/**
* Given an attribute, retrieves the default attribute value in system memory
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] attrObj The attribute object for which to retrieve the default value
* @return the const void pointer to the data
*/
const void*(CARB_ABI* getDefault)(const GraphContextObj& context, const AttributeObj& attrObj);
/** @private Deprecated - do not use */
ConstBundleHandle(CARB_ABI* deprecated_4)(const GraphContextObj&, NodeContextHandle, NameToken);
/** @private Deprecated - do not use */
size_t (CARB_ABI* deprecated_5)(const GraphContextObj&, NodeContextHandle, NameToken);
/** @private Deprecated - do not use */
void (CARB_ABI* deprecated_6)(const GraphContextObj&,NodeContextHandle,NameToken,ConstBundleHandle*);
/** @private Deprecated - do not use */
BundleHandle (CARB_ABI* deprecated_3)(const GraphContextObj& , NodeContextHandle, NameToken);
/** @private Deprecated - do not use */
void(CARB_ABI* deprecated_7)(
ConstAttributeDataHandle*, const GraphContextObj&, NodeContextHandle, const NameToken*, size_t);
/** @private Deprecated - do not use */
void(CARB_ABI* deprecated_8)(AttributeDataHandle*, const GraphContextObj&, NodeContextHandle, const NameToken*, size_t);
/**
* Retrieve the number of attributes that a given node has
*
* @param[in] contextObj: Structure containing both the interface and underlying object
* @param[in] node The node to query the attribute count from
* @return the number of attributes the queried node has
*/
size_t (CARB_ABI* getAttributesCount)(const GraphContextObj& contextObj, NodeContextHandle node);
/** @private Deprecated - do not use */
void(CARB_ABI* deprecated_9)(ConstAttributeDataHandle*, const GraphContextObj&, NodeContextHandle, size_t);
/** @private Deprecated - do not use */
void(CARB_ABI* deprecated_10)(AttributeDataHandle*, const GraphContextObj&, NodeContextHandle, size_t);
/** @private Deprecated - do not use */
BundleHandle(CARB_ABI* deprecated_11)(const GraphContextObj&, NodeContextHandle, NameToken, ConstBundleHandle);
/** @private Deprecated - do not use */
[[deprecated("Use copyBundleContentsInto")]] void (CARB_ABI* copyPrimContentsInto)(const GraphContextObj& contextObj,
BundleHandle destBundleHandle,
ConstBundleHandle sourceBundleHandle);
/** @private Retired - do not use */
void(CARB_ABI* retired_10)();
/** @private Deprecated - do not use */
[[deprecated("Use clearBundleContents")]] void(CARB_ABI* clearPrimContents)(const GraphContextObj& contextObj, BundleHandle bundleHandle);
/** @private Retired - do not use */
void(CARB_ABI* retired_9)();
/**
* Returns the global playback time in frames
* @param[in] context structure containing both the interface and underlying object
* @return the global playback time in frames
*/
float(CARB_ABI* getFrame)(const GraphContextObj& contextObj);
/**
* Returns the state of global playback
* @param[in] context structure containing both the interface and underlying object
* @return true if playback has started, false is playback is stopped
*/
bool(CARB_ABI* getIsPlaying)(const GraphContextObj& contextObj);
/**
* Runs the inspector on the data in the given graph context.
*
* @param[in] contextObj The graph context on which the inspector runs
* @param[in] inspector The inspector class
* @return true if the inspection ran successfully, false if the inspection type is not supported
*/
bool(CARB_ABI* inspect)(const GraphContextObj& contextObj, inspect::IInspector* inspector);
/**
* Returns the time since the App started
* @param[in] context structure containing both the interface and underlying object
* @return the global time since the app started in seconds
*/
double(CARB_ABI* getTimeSinceStart)(const GraphContextObj& contextObj);
/**
* Returns whether the graph context object is still valid or not
*
* @param[in] contextObj The context object for which to query
* @return Whether the context is still valid
*/
bool(CARB_ABI* isValid)(const GraphContextObj& contextObj);
/** @private Deprecated - do not use */
AttributeDataHandle (CARB_ABI* deprecated_0)(const GraphContextObj&, const IVariablePtr&);
/** @private Deprecated - do not use */
ConstAttributeDataHandle(CARB_ABI* deprecated_1)(const GraphContextObj&, const IVariablePtr&);
/**
* Returns the accumulated total of elapsed times between rendered frames
* @param[in] contextObj structure containing both the interface and underlying object
* @return the accumulated total of elapsed times between rendered frames
*/
double(CARB_ABI* getAbsoluteSimTime)(const GraphContextObj& contextObj);
/** @private Deprecated - do not use */
NameToken(CARB_ABI* deprecated_2)(const GraphContextObj&);
/** Deprecated - do not use - removal scheduled for 106 **/
[[deprecated("Use registerForUSDWriteBacks")]]
void(CARB_ABI* registerForUSDWriteBack)(const GraphContextObj& contextObj, BundleHandle bundle, NameToken attrib);
/**
* Given a variable and an instance path, returns a handle to access its data.
*
* @param[in] contextObj The context object used to find the variable data
* @param[in] variable The variable to retrieve the data from
* @param[in] Path to the prim holding an instance of this graph
*
* @returns An attribute data handle that can be used to access the variable data.
* If the given prim does not contain an instance of the graph, the data handle
* returned will be invalid.
*/
AttributeDataHandle(CARB_ABI* getVariableInstanceDataHandle)(const GraphContextObj& contextObj,
const IVariablePtr& variable,
const char* instancePrimPath);
/**
* Given a variable and an instance path, returns a constant handle to access its data as readonly.
*
* @param[in] contextObj The context object used to find the variable data
* @param[in] variable The variable to retrieve the data from
* @param[in] Path to the prim holding an instance of this graph
*
* @returns An constant attribute data handle that can be used to access the variable data.
* If the given prim does not contain an instance of the graph, the data handle
* returned will be invalid.
*/
ConstAttributeDataHandle(CARB_ABI* getVariableInstanceConstDataHandle)(const GraphContextObj& contextObj,
const IVariablePtr& variable,
const char* instancePrimPath);
/**
* Get the Prim path of the graph target.
*
* The graph target is defined as the parent Prim of the compute graph, except during
* instancing - where OmniGraph executes a graph once for each Prim. In the case
* of instancing, the graph target will change at each execution to be the path of the instance.
* If this is called outside of graph execution, the path of the graph Prim is returned, or an empty
* token if the graph does not have a Prim associated with it.
*
* @param[in] contextObj The context object used to find the data.
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*
* @returns a token representing the path of the graph target primitive.
*/
NameToken const&(CARB_ABI* getGraphTarget)(const GraphContextObj& contextObj, InstanceIndex instanceIndex);
/**
* Given a variable, returns a handle to access its data.
*
* @param[in] contextObj The context object used to find the variable data
* @param[in] variable The variable to retrieve the data from
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*
* @returns An attribute data handle that can be used to access the variable data.
*/
AttributeDataHandle(CARB_ABI* getVariableDataHandle)(const GraphContextObj& contextObj,
const IVariablePtr& variable,
InstanceIndex instanceIndex);
/**
* Given a variable, returns a constant handle to access its data as readonly.
*
* @param[in] contextObj The context object used to find the variable data
* @param[in] variable The variable to retrieve the data from
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*
* @returns A constant attribute data handle that can be used to access the variable data.
*/
ConstAttributeDataHandle(CARB_ABI* getVariableConstDataHandle)(const GraphContextObj& contextObj,
const IVariablePtr& variable,
InstanceIndex instanceIndex);
/** @private Deprecated - do not use */
[[deprecated("Use getOutputBundle!")]] BundleHandle(CARB_ABI* getOutputPrim)(const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken bundleName,
InstanceIndex instanceIndex);
/** @private Deprecated - do not use */
[[deprecated("Use getInputTarget!")]] ConstBundleHandle(CARB_ABI* getInputPrim)(const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken bundleName,
InstanceIndex instanceIndex);
/** @private Deprecated - do not use */
[[deprecated("Use getInputTargetCount!")]] size_t(CARB_ABI* getInputPrimCount)(const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken relName,
InstanceIndex instanceIndex);
/** @private Deprecated - do not use */
[[deprecated("Use getInputTargets!")]] void(CARB_ABI* getInputPrims)(const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken relName,
ConstBundleHandle* bundleHandles,
InstanceIndex instanceIndex);
/**
* Requests some input attributes of the specified compute node in the specified context.
*
* If no input attribute with the given name exists on the node, the returned handle
* will return false from its isValid() function.
*
*
* @param[in/out] attrsOut A pre-allocated array that will be filled with the requested handles
* @param[in] contextObj The context object used to find the data
* @param[in] node The node object to retrieve the data from
* @param[in] attrNames An array of names of attributes on the given node to retrieve a data handle for
* @param[in] count The size of the provided arrays (attrName and attrsOut)
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*/
void(CARB_ABI* getAttributesByNameR)(ConstAttributeDataHandle* attrsOut,
const GraphContextObj& contextObj,
NodeContextHandle node,
const NameToken* attrNames,
size_t count,
InstanceIndex instanceIndex);
/**
* Requests some output attributes of the specified compute node in the specified context.
*
* If no input attribute with the given name exists on the node, the returned handle
* will return false from its isValid() function.
*
*
* @param[in/out] attrsOut A pre-allocated array that will be filled with the requested handles
* @param[in] contextObj The context object used to find the data
* @param[in] node The node object to retrieve the data from
* @param[in] attrNames An array of names of attributes on the given node to retrieve a data handle for
* @param[in] count The size of the provided arrays (attrName and attrsOut)
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*/
void(CARB_ABI* getAttributesByNameW)(AttributeDataHandle* attrsOut,
const GraphContextObj& contextObj,
NodeContextHandle node,
const NameToken* attrNames,
size_t count,
InstanceIndex instanceIndex);
/**
* Requests all input attributes of the specified compute node in the specified context.
*
*
* @param[in/out] attrsOut A pre-allocated array that will be filled with the requested handles
* @param[in] contextObj The context object used to find the data
* @param[in] node The node object to retrieve the data from
* @param[in] count The size of the attrsOut array
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*/
void(CARB_ABI* getAttributesR)(ConstAttributeDataHandle* attrsOut,
const GraphContextObj& contextObj,
NodeContextHandle node,
size_t count,
InstanceIndex instanceIndex);
/**
* Requests all output attributes of the specified compute node in the specified context.
*
*
* @param[in/out] attrsOut A pre-allocated array that will be filled with the requested handles
* @param[in] contextObj The context object used to find the data
* @param[in] node The node object to retrieve the data from
* @param[in] count The size of the attrsOut array
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*/
void(CARB_ABI* getAttributesW)(AttributeDataHandle* attrsOut,
const GraphContextObj& contextObj,
NodeContextHandle node,
size_t count,
InstanceIndex instanceIndex);
/** @private Deprecated - do not use */
[[deprecated("use copyBundleContentsIntoOutput")]] BundleHandle(CARB_ABI* copyPrimContentsIntoOutput)(
const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken outBundleName,
ConstBundleHandle sourceBundleHandle,
InstanceIndex instanceIndex);
/**
* Given an attribute of array type, return a pointer to the number of elements in the array
* If flags is kWriteOnly or kReadAndWrite then writing to the dereferenced pointer resizes the
* array the next time it is accessed on CPU or GPU
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] attrObj The attribute object for which to retrieve the array size
* @param[in] flags The data access flags indicating whether the array size is to be R, W, or RW
* @return the pointer to the array size
*/
size_t*(CARB_ABI* getArrayAttributeSize)(const GraphContextObj& context,
const AttributeObj& attrObj,
DataAccessFlags flags,
InstanceIndex instanceIndex);
/**
* Given an attribute of array type, return a GPU pointer to the number of elements in the array
* Flags must be kReadOnly, because currently we don't allow GPU code to resize GPU arrays
* This restriction may be relaxed in the future
* If you want to resize a GPU array you can do it on the CPU using getArrayAttributeSize
*
* @param[in] context structure containing both the interface and underlying object
* @param[in] attrObj The attribute object for which to retrieve the array size
* @param[in] flags The data access flags indicating whether the array size is to be R, W, or RW
* @return the pointer to the array size
*/
size_t*(CARB_ABI* getArrayAttributeSizeGPU)(const GraphContextObj& context,
const AttributeObj& attrObj,
DataAccessFlags flags,
InstanceIndex instanceIndex);
/**
* Requests an output bundle of the specified compute node in the specified context.
*
* If no output bundle with the given name exists on the node, the returned handle
* will return false from its isValid() function.
*
* @param[in] contextObj The context object used to find the data
* @param[in] node The node object to retrieve the data from
* @param[in] outputName The name of the attribute on the given node that represent the output bundle
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*
* @returns A handle to the requested prim
*/
BundleHandle(CARB_ABI* getOutputBundle)(const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken outputName,
InstanceIndex instanceIndex);
/**
* Requests an input target path of the specified compute node in the specified context.
*
* If no input target path with the given name exists on the node, the returned path
* will return uninitialized path.
*
* @param[in] contextObj The context object used to find the data
* @param[in] node The node object to retrieve the data from
* @param[in] inputName The name of the attribute on the given node that represent the input bundle
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*
* @returns A path to the requested target
*/
omni::fabric::PathC(CARB_ABI* getInputTarget)(const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken inputName,
InstanceIndex instanceIndex);
/**
* Requests the number of input targets in the relationship with the given name on the
* specified compute node in the specified context.
*
* This returns 0 if no relationship with the given name exists on the node
* or the relationship is empty.
*
* @param[in] contextObj The context object used to find the data
* @param[in] node The node object to retrieve the data from
* @param[in] inputName The name of the relationship attribute on the given node that represent the input targets
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*
* @returns The number of input targets under the provided relationship
*/
size_t(CARB_ABI* getInputTargetCount)(const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken inputName,
InstanceIndex instanceIndex);
/**
* Fills in the provided targets array with paths to all of the input targets(bundles or primitives)
* in the relationship with the given name on the specified compute node in the specified context.
*
* The caller *must* first call getInputTargetCount to ensure that the targets array will be
* sufficiently large to receive all of the paths.
*
* @param[in] contextObj The context object used to find the data
* @param[in] node The node object to retrieve the data from
* @param[in] inputName The name of the relationship attribute on the given node that represent the input targets
* @param[in,out] targets A pre-sized array that will be filled with the requested paths
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*/
void(CARB_ABI* getInputTargets)(const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken inputName,
omni::fabric::PathC* targets,
InstanceIndex instanceIndex);
/**
* Creates copies of all attributes from sourceBundleHandle in the output bundle
* with the specified name on the specified node.
*
* This function is equivalent to:
* BundleHandle retBundle = iContext.getOutputBundle(context, node, outBundleName);
* iContext.copyBundleContentsInto(context, retBundle, sourceBundleHandle);
* return retBundle;
* but with a single function pointer call, instead of two.
*
* @param[in] contextObj The context object used to find the data
* @param[in] node The node object to retrieve the data from
* @param[in] outBundleName The name of the attribute on the given node that represent the output bundle to write to
* @param[in] sourceBundleHandle A handle to a bundle to copy content from
* @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph
*
* @returns An handle to the targeted output bundle
*/
BundleHandle(CARB_ABI* copyBundleContentsIntoOutput)(const GraphContextObj& contextObj,
NodeContextHandle node,
NameToken outBundleName,
ConstBundleHandle sourceBundleHandle,
InstanceIndex instanceIndex);
/**
* Creates copies of all attributes from sourceBundleHandle in the bundle corresponding with destBundleHandle.
*
* @param contextObj The context object used to find the data
* @param destBundleHandle A handle to a bundle to copy content to
* @param sourceBundleHandle A handle to a bundle to copy content from
*/
void(CARB_ABI* copyBundleContentsInto)(const GraphContextObj& contextObj,
BundleHandle destBundleHandle,
ConstBundleHandle sourceBundleHandle);
/**
* Removes all attributes from the prim corresponding with bundleHandle,
* but keeps the bundle itself.
*
* @param contextObj The context object used to find the data
* @param bundleHandle A handle to a bundle to clear content from
*/
void(CARB_ABI* clearBundleContents)(const GraphContextObj& contextObj, BundleHandle bundleHandle);
/**
* Register provided attributes for USD write back at the end of the current frame
*
*
* @param[in] contextObj The context object used to find the data.
* @param[in] handles An array of handles of the attributes that should be written back to usd
* @param[in] count The size of the provided array
*/
void(CARB_ABI* registerForUSDWriteBacks)(const GraphContextObj& contextObj, AttributeDataHandle const* handles, size_t count);
/**
* Register provided attributes for USD write back at the end of the current frame to a specific layer
*
*
* @param[in] contextObj The context object used to find the data.
* @param[in] handles An array of handles of the attributes that should be written back to usd
* @param[in] count The size of the provided array
* @param[in] layerIdentifier The unique name for the layer to be written
*/
void(CARB_ABI* registerForUSDWriteBacksToLayer)(const GraphContextObj& contextObj,
AttributeDataHandle const* handles,
size_t count,
NameToken layerIdentifier);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IGraphContext, registerForUSDWriteBacksToLayer, 65)
// ==============================================================================================================
/** The underlying schedule node represents the scheduled task(s) corresponding to the representational node in the
* graph. As OmniGraph evolves, the schedule node will be extended to store the results of the instance(s)/task(s)
* corresponding to the representational node.
*/
struct IScheduleNode
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::IScheduleNode", 1, 0);
//! @private Deprecated: Task generation is the responsibility of the execution framework
NodeObj(CARB_ABI* getNode)(const ScheduleNodeObj& scheduleNodeObj);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IScheduleNode, getNode, 0)
// ==============================================================================================================
//! @private Retired prototype
struct IDataStealingPrototype
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::IDataStealingPrototype", 1, 1);
/** @private Retired - do not use */
bool(CARB_ABI* __retired_0)(const GraphContextObj&,ConstAttributeDataHandle,ConstAttributeDataHandle);
/** @private Retired - do not use */
AttributeDataHandle(CARB_ABI* __retired_1)(const GraphContextObj&,ConstAttributeDataHandle );
/** @private Retired - do not use */
bool(CARB_ABI* __retired_2)(const GraphContextObj&,ConstBundleHandle,ConstBundleHandle);
/** @private Retired - do not use */
BundleHandle(CARB_ABI* __retired_3)(const GraphContextObj&, ConstBundleHandle);
bool(CARB_ABI* enabled)(const GraphContextObj&);//always false
/** @private Retired - do not use */
void(CARB_ABI* __retired_4)(const GraphContextObj&, bool);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IDataStealingPrototype, __retired_4, 5)
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/PostUsdInclude.h | // Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// This file should be included in conjunction with PreUsdInclude.h when including any of the individual USD
// definition files. This file restores the warnings that were disabled for the inclusion of USD files.
//
// It includes a special ifdef detection to prevent inclusion of this file without PreUsdInclude.h as that
// would have unpredictable effects on the compiler. The usual "#pragma once" is omitted, so that the mechanism works
// correctly even with multiple uses. It is not legal to include anything other than USD headers between these two.
//
// Here is an example of how you use this mechanism to include the definition of the USD type pxr::GfHalf:
//
// #include <omni/graph/core/PreUsdInclude.h>
// #include <pxr/base/gf/half.h>
// #include <omni/graph/core/PostUsdInclude.h>
//
#ifdef _MSC_VER
# pragma warning(pop)
# undef NOMINMAX
#elif defined(__GNUC__)
# pragma GCC diagnostic pop
# ifdef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS
# define __DEPRECATED
# undef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS
# endif
#endif
#ifdef __USD_INCLUDE_PROTECTION__
# undef __USD_INCLUDE_PROTECTION__
#else
# error "You must include PreUsdInclude.h before including PostUsdInclude.h"
#endif
|
omniverse-code/kit/include/omni/graph/core/IGraphRegistry.h | // Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/events/IEvents.h>
#include <carb/Interface.h>
#include <omni/graph/core/Handle.h>
#include <omni/inspect/IInspector.h>
namespace omni {
namespace graph {
namespace core {
/**
* An event that occurs on the graph registry
*/
enum class IGraphRegistryEvent
{
//! Node type has been added to the registry. Event payloads are:
//! "node_type" (std::string): Name of new node type
eNodeTypeAdded,
//! Node type has been removed from the registry. Event payloads are:
//! "node_type" (std::string): Name of removed node type
eNodeTypeRemoved,
//! Node type has had its namespace changed. Event payloads are:
//! "node_type" (std::string): New namespace
//! "prev_type" (std::string): Previous namespace
eNodeTypeNamespaceChanged,
//! Node type has had its category changed. Event payloads are:
//! "node_type" (std::string): Node type whose category changed
//! "prev_value" (std::string): Previous category value
eNodeTypeCategoryChanged
};
// ======================================================================
//! Interface that manages the registration and deregistration of node types
struct IGraphRegistry
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::IGraphRegistry", 1, 5);
/**
* Returns the number of registered types in the graph. This includes C++ types only.
*
* @return The number of subgraphs in the graph
*/
size_t(CARB_ABI* getRegisteredTypesCount)();
/**
* Gets the list of the registered types in the graph. This includes C++ types only.
*
* @param[out] typesBuf Buffer to hold the return array of node type objects
* @param[in] bufferSize the number of NodeTypeObj structures the buffer is able to hold
* @return true on success, false on failure
*/
bool(CARB_ABI* getRegisteredTypes)(NodeTypeObj* typesBuf, size_t bufferSize);
/**
* Gets the version of the registered node type. This includes both C++ and Python types.
*
* @param[in] nodeType the name of the node type in question.
* @return the version number of the currently registered type. If the type is not found
* returns the default version number, which is 0
*/
int(CARB_ABI* getNodeTypeVersion)(const char* nodeType);
/**
* Registers a node type as defined above with the system
* Deprecated. Use registerNodeTypeInterface instead.
*
* @param[in] desc Reference to the node type interface (the underlying object is not yet available here)
* @param[in] version Version of the node interface to be registered
*/
CARB_DEPRECATED("Will be removed in next major version, use IGraphRegistry::registerNodeTypeInterface instead")
void(CARB_ABI* registerNodeType)(const INodeType& desc, int version);
/**
* Unregisters a node type interface as defined above with the system
*
* @param[in] nodeType Name of the node type to be unregistered
*/
void(CARB_ABI* unregisterNodeType)(const char* nodeType);
/**
* Registers an alias by which a node type can be referred to. Useful for backward compatibility in files.
*
* @param[in] desc Reference to the node type interface
* @param[in] alias Alternate name that can be used to refer to the node type when creating
*/
CARB_DEPRECATED("Will be removed in next major version, use INodeTypeForwarding instead")
void(CARB_ABI* registerNodeTypeAlias)(const INodeType& desc, const char* alias);
/**
* Runs the inspector on the contents of the graph registry.
*
* @param[in] inspector The inspector class
* @return true if the inspection ran successfully, false if the inspection type is not supported
*/
bool(CARB_ABI* inspect)(inspect::IInspector* inspector);
/**
* Gets the node type information corresponding to the node type name. This includes aliases.
*
* @param[in] bufferSize the number of NodeTypeObj structures the buffer is able to hold
* @return true on success, false on failure
*/
NodeTypeObj(CARB_ABI* getRegisteredType)(const char* nodeTypeName);
/**
* Returns the interface for the event stream for the changes on the graph registry
*
* The events that are raised are specified by IGraphRegistryEvent
*
* @return the event stream interface that pumps events
*/
carb::events::IEventStreamPtr(CARB_ABI* getEventStream)();
/**
* Registers a node type as defined above with the system
*
* @param[in] desc Reference to the node type interface (the underlying object is not yet available here)
* @param[in] version Version of the node interface to be registered
* @param[in] The size of the INodeType struct being passed. Use sizeof(INodeType).
*
*/
void(CARB_ABI* registerNodeTypeInterface)(const INodeType& desc, int version, size_t nodeTypeStructSize);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IGraphRegistry, registerNodeTypeInterface, 9)
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/ISchedulingHints.gen.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Interface to the list of scheduling hints that can be applied to a node type
template <>
class omni::core::Generated<omni::graph::core::ISchedulingHints_abi> : public omni::graph::core::ISchedulingHints_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::ISchedulingHints")
/**
* Get the threadSafety status (i.e. can be run in parallel with other nodes)
*
* @returns Is the node compute threadsafe?
*/
omni::graph::core::eThreadSafety getThreadSafety() noexcept;
/**
* Set the flag indicating if a node is threadsafe or not.
*
* @param[in] newThreadSafety New value of the threadsafe flag
*/
void setThreadSafety(omni::graph::core::eThreadSafety newThreadSafety) noexcept;
/**
* Get the type of access the node has for a given data type
*
* @param[in] dataType Type of data for which access type is being modified
* @returns Value of the access type flag
*/
omni::graph::core::eAccessType getDataAccess(omni::graph::core::eAccessLocation dataType) noexcept;
/**
* Set the flag describing how a node accesses particular data in its compute _abi (defaults to no access).
* Setting any of these flags will, in most cases, automatically mark the node as "not threadsafe".
* One current exception to this is allowing a node to be both threadsafe and a writer to USD, since
* such behavior can be achieved if delayed writebacks (e.g. "registerForUSDWriteBack") are utilized
* in the node's compute method.
*
* @param[in] dataType Type of data for which access type is being modified
* @param[in] newAccessType New value of the access type flag
*/
void setDataAccess(omni::graph::core::eAccessLocation dataType, omni::graph::core::eAccessType newAccessType) noexcept;
/**
* Get the flag describing the compute rule which may be followed by the evaluator.
*
* @returns Value of the ComputeRule flag
*/
omni::graph::core::eComputeRule getComputeRule() noexcept;
/**
* Set the flag describing the compute rule which may be followed by the evaluator.
*
* @param[in] newComputeRule New value of the ComputeRule flag
*/
void setComputeRule(omni::graph::core::eComputeRule newComputeRule) noexcept;
/**
* Runs the inspector on the scheduling hints.
*
* @param[in] inspector The inspector class
* @return true if the inspection ran successfully, false if the inspection type is not supported
*/
bool inspect(omni::core::ObjectParam<omni::inspect::IInspector> inspector) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::core::eThreadSafety omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::getThreadSafety() noexcept
{
return getThreadSafety_abi();
}
inline void omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::setThreadSafety(
omni::graph::core::eThreadSafety newThreadSafety) noexcept
{
setThreadSafety_abi(newThreadSafety);
}
inline omni::graph::core::eAccessType omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::getDataAccess(
omni::graph::core::eAccessLocation dataType) noexcept
{
return getDataAccess_abi(dataType);
}
inline void omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::setDataAccess(
omni::graph::core::eAccessLocation dataType, omni::graph::core::eAccessType newAccessType) noexcept
{
setDataAccess_abi(dataType, newAccessType);
}
inline omni::graph::core::eComputeRule omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::getComputeRule() noexcept
{
return getComputeRule_abi();
}
inline void omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::setComputeRule(
omni::graph::core::eComputeRule newComputeRule) noexcept
{
setComputeRule_abi(newComputeRule);
}
inline bool omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::inspect(
omni::core::ObjectParam<omni::inspect::IInspector> inspector) noexcept
{
return inspect_abi(inspector.get());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/core/OgnHelpers.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// This file contains helper functions used by the generated .ogn file code.
// You shouldn't normally have to look in here.
#include <omni/graph/core/ogn/Types.h>
#include <omni/graph/core/ogn/State.h>
#include <omni/graph/core/ogn/Database.h>
#include <omni/graph/core/ogn/AttributeInitializer.h>
#include <omni/graph/core/ogn/Registration.h>
// The fabric namespace usage is somewhat more targetted though so it can be explicit
using omni::fabric::IToken;
using omni::fabric::IPath; |
omniverse-code/kit/include/omni/graph/core/IAttributeType.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/graph/core/iComputeGraph.h>
#include <carb/Defines.h>
#include <carb/Interface.h>
#include <carb/Types.h>
#include <omni/graph/core/Handle.h>
#include <omni/graph/core/Type.h>
#include <omni/inspect/IInspector.h>
namespace omni {
namespace graph {
namespace core {
// ======================================================================
/**
* @brief Interface class managing various features of attribute types
*
*/
struct IAttributeType
{
//! @private to avoid doxygen problems
CARB_PLUGIN_INTERFACE("omni::graph::core::IAttributeType", 1, 4);
/**
* @brief Returns an attribute type object corresponding to the OGN-style type name
*
* The type name is assumed to contain no whitespace for efficiency, so prune it before calling if necessary.
*
* @param[in] ognTypeName Attribute type name in the format used by the .ogn files
* @return The attribute type description corresponding to the type name
*/
Type (CARB_ABI* typeFromOgnTypeName)(const char* ognTypeName);
/**
* @brief Returns an attribute type object corresponding to the Sdf-style type name.
*
* The type name is assumed to contain no whitespace for efficiency, so prune it before calling if necessary.
* Note that some types cannot be expressed in this form (e.g. the extended types such as "union" and "any", and
* OGn-only types such as "bundle") so where possible use the typeFromOgnTypeName() method.
*
* @param[in] sdfTypeName Attribute type name in the format used by pxr::SdfValueTypeNames
* @return The attribute type description corresponding to the type name
*/
Type (CARB_ABI* typeFromSdfTypeName)(const char* sdfTypeName);
/**
* @brief Returns the size of the base data (without tuples or array counts) for the given attribute type
*
* @param[in] type Attribute type whose size is to be returned
* @return Size of the base data stored by the attribute type, 0 if none is stored
*/
size_t (CARB_ABI* baseDataSize)(Type const& type);
/**
* @brief Runs the inspector on the attribute data with the given type.
*
* @param[in] type The attribute type of the raw data
* @param[in] data Pointer to the raw data of the given type.
* @param[in] elementCount Number of array elements in the data (1 if not an array)
* @param[in] inspector The inspector class
* @return true if the inspection ran successfully, false if the inspection type is not supported
*/
bool(CARB_ABI* inspect)(Type const& type, void const* data, size_t arrayElementCount, inspect::IInspector* inspector);
/**
* @brief Returns the SdfValueTypeName corresponding to the given type.
*
* @note Not all OGN Types are fully represented in the Sdf schema since they have additional semantics in OGN
* which do not exist in USD. In that case the SdfValueTypeName of the base type will be returned, which is
* what is used to serialize the attribute.
*
* For example Type(BaseDataType::UInt64, 1, 0, AttributeRole::eObjectId) is an OGN "objectId" which will
* return just "uint64" from this function.
*
* @param[in] type The Type in question
* @return The token of the corresponding SdfTypeName
*/
NameToken(CARB_ABI* sdfTypeNameFromType)(Type const& type);
/**
* @brief Checks to see if the Type passed in corresponds to a legal OGN type.
*
* @param[in] type Type to be checked
* @return true if the Type can be fully represented by OGN
* @return false if the Type does not correspond exactly to an OGN type
*
*/
bool(CARB_ABI* isLegalOgnType)(Type const& type);
/**
* @brief Retreives the number of attribute unions.
*
* @return The number of attribute union types.
*/
size_t(CARB_ABI* getUnionTypeCount)();
/**
* @brief Retrieves the name of the available union types
*
* @param[in] buffer The array of pointers to fill in with names of the union types.
* The values returned are only valid while the list of unions is not changing.
* @param[in] bufferSize The number of entries to retreive. Use getUnionTypeCount to retrieve the number
* available.
* @return The number of entries written to buffer.
*
*/
size_t(CARB_ABI* getUnionTypes)(const char** buffer, size_t bufferSize);
/**
* @brief Retrieves the number of entries for the attribute union of the given name
*
* @param[in] unionType The name of the attribute union to retrieve.
* @return The number of entries associated with unionType. If unionType is not valid, 0 is returned.
*
*/
size_t(CARB_ABI* getUnionTypeEntryCount)(const char* unionType);
/**
* @brief Gets the list of ogn type names associated with an attribute union.
*
* The list of returned types is fully expanded. This means if an attribute union is defined in terms
* of other attributes unions, the entries returned will have recursively expanded each entry and the
* final list will only contain ogn type names and not other attribute unions names.
*
* @param[in] unionType The name of the attribute union to retrieve.
* @param[in] buffer The array of pointers to fill in with names of the ogn types names.
* The values returned are only valid while the list of unions is not changing.
* @param[in] bufferSize The number of entries to retrieve. Use getUnionTypeEntryCount to find the maximum available.
* @return The number of items written to buffer. If unionType is not valid, 0 is returned.
*
*/
size_t(CARB_ABI* getUnionTypeEntries)(const char* unionType, const char** buffer, size_t bufferSize);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IAttributeType, getUnionTypeEntries, 9)
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/GpuArray.h | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <stddef.h>
#include <stdint.h>
namespace omni
{
namespace graph
{
namespace core
{
template <typename T>
struct GpuArray
{
// GPU pointer to data
T* const* gpuData;
// GPU pointer to elem count
const size_t* elemCount;
#ifdef __CUDACC__
__device__ T* data()
{
return *gpuData;
}
__device__ size_t size() const
{
return *elemCount;
}
#endif
};
template <typename T>
struct ArrayOfGpuArray
{
// GPU array of GPU data pointers
T* const* gpuData;
// GPU array of elem counts
const size_t* elemCount;
#ifdef __CUDACC__
__device__ T* data(size_t i) const
{
return gpuData[i];
}
__device__ size_t size(size_t i) const
{
return elemCount[i];
}
#endif
};
template <typename T>
struct ConstGpuArray
{
// GPU pointer to data
const T* const* gpuData;
// GPU pointer to elem count
const size_t* elemCount;
#ifdef __CUDACC__
__device__ const T* data() const
{
return *gpuData;
}
__device__ size_t size() const
{
return *elemCount;
}
#endif
};
template <typename T>
struct ArrayOfConstGpuArray
{
// GPU array of GPU data pointers
const T* const* gpuData;
// GPU array of elem counts
const size_t* elemCount;
#ifdef __CUDACC__
__device__ const T* data(size_t i) const
{
return gpuData[i];
}
__device__ size_t size(size_t i) const
{
return elemCount[i];
}
#endif
};
}
}
}
|
omniverse-code/kit/include/omni/graph/core/ComputeGraph.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "iComputeGraph.h"
#include "INodeCategories.h"
#include "unstable/INodeTypeForwarding.h"
#include "IBundleFactory.h"
#include "IDirtyID.h"
#include "IBundleChanges.h"
#include <carb/Interface.h>
#include <carb/tasking/TaskingTypes.h>
#include <omni/core/IObject.h>
namespace rtx
{
namespace resourcemanager
{
class RpResource;
typedef uint32_t SyncScopeId;
}
}
namespace carb
{
namespace graphics
{
struct Semaphore;
}
}
namespace gpu
{
namespace rendergraph
{
//! Declare the IRenderGraph interface definition
OMNI_DECLARE_INTERFACE(IRenderGraph);
using RenderGraph = omni::core::ObjectPtr<gpu::rendergraph::IRenderGraph>;
}
enum class GfResult: int32_t;
}
namespace omni
{
namespace usd
{
class IUsdMutex;
using PathH = uint64_t;
namespace hydra
{
struct ViewportHydraRenderResults;
}
}
namespace kit
{
struct StageUpdateSettings;
}
namespace graph
{
namespace core
{
/** used only by kit - do not use this interface in plugins */
struct ComputeGraph
{
CARB_PLUGIN_INTERFACE("omni::graph::core::ComputeGraph", 2, 8)
/**
* Returns the number of global orchestration graphs for all stages of the graph pipelines
*
* Note: from version 2.3 on, the semantics of this call has changed, even if the interface has not.
* There is now a orchestration graph which has nodes that each wrap either a graph or an extension to be run
* This function will now return the number of such orchestration graphs. The non-orchestration global graphs
* can be retrieved by iterating over the nodes of the orchestration graph and calling getWrappedGraph
*
* @return the number of graphs in the system, regardless of pipeline stage
*/
size_t(CARB_ABI* getGraphCount)();
/**
* Fills the buffer with global orchestration graph objects that occupy all pipeline stages
*
* Note: From version 2.3 on, see note above about the global orchestration graph with nodes that
* wrap previous global graphs
*
* @param[out] graphObjs The buffer of graphs to fill
* @param[in] bufSize The size of the buffer in terms of the number of GraphObj it has room for
* @return true on success, false on failure
*/
bool(CARB_ABI* getGraphs)(GraphObj* contextBuf, size_t bufSize);
/**
* Returns the number of graph contexts for all pipeline stages
*
* Note: From version 2.3 on, see note above about the global orchestration graph with nodes that
* wrap previous global graphs
*
* @return the number of graph contexts in the whole system, regardless of pipeline stage
*/
size_t(CARB_ABI* getGraphContextCount)();
/**
* Fills the buffer with graph context objects that occupy all pipeline stages
*
* Note: From version 2.3 on, see note above about the global orchestration graph with nodes
* that wrap previous global graphs
*
* @param[out] contextBuf The buffer of graph contexts to fill
* @param[in] bufSize The size of the buffer in terms of the number of GraphContextObj it has room for
* @return true on success, false on failure
*/
bool(CARB_ABI* getGraphContexts)(GraphContextObj* contextBuf, size_t bufSize);
/**
* @brief Shut down all of the compute graph infrastructure
*
* The functionality in this method was originally introduced because we didn't have carbOnPluginShutdown working
* properly (it's not being called for some reason), but later we decided to keep it because DS needs more control
* over OG's startup and shutdown.
*/
void(CARB_ABI* shutdownComputeGraph)();
/**
* @brief Start up all of the compute graph infrastructure
*
* Needed by DS to have more control over OG startup / shutdown mechanism
*/
void(CARB_ABI* startupComputeGraph)();
/**
* @brief Attach OmniGraph to the given stage
*
* @param[in] stageId ID of the stage to which OmniGraph should attach
* @param[in] metersPerUnit Length units of the stage
* @param[in] userData Extra raw data to use for and identify the attachment
*
*/
void(CARB_ABI* attach)(long int stageId, double metersPerUnit, void* userData);
/**
* @brief Detach OmniGraph using the information in the @p userData
*
* @param[in] userData Extra raw data to use for and identify the detachment
*
*/
void(CARB_ABI* detach)(void* userData);
/**
* @deprecated This version is deprecated and will be removed in a future version.
* Use updateV2() instead.
*/
void(CARB_ABI* update)(float currentTime,
float elapsedSecs,
const omni::kit::StageUpdateSettings* updateSettings,
void* userData);
/**
* @deprecated This version is deprecated and will be removed in a future version.
* Use updateSimStepWithUsd() instead.
*/
void(CARB_ABI* updateSimStep)(int64_t timeNS, carb::tasking::Counter*);
/**
* @brief Enable of disable the attachment of OmniGraph to the Kit update looop
*
* Currently there is a dependency in SimStageWithHistory on OG, so we cannot shutdown the graph
* when it needs to be ticked from external process, such as DS2's ISimStep.
* Using this method we can block (or unblock) OG from reacting to Kit's update loop.
*
* @param[in] state Whether the Kit update loop should be respected or not
*/
void(CARB_ABI* considerKitUpdateLoop)(bool state);
/**
* @brief postRenderBegin is called after IHydraEngine::render() after the postRender starts
* in order to tick graphs in the postRender pipeline stage.
*
* @param[in] syncScope ID for the resource manager's sync scope
* @param[in] renderGraph Graph that will be processed in the postRender
*/
void(CARB_ABI* postRenderBegin)(rtx::resourcemanager::SyncScopeId syncScope,
gpu::rendergraph::RenderGraph renderGraph);
/**
* @brief postRenderBegin is called after IHydraEngine::render() during the postRender update
* in order to tick graphs in the postRender pipeline stage.
*
* @param[in] syncScope ID for the resource manager's sync scope
* @param[in] renderResults Return value from IHydraEngine::render()
* @param[in] renderProductPrimPath Prim path of the RenderProduct for this view
* @param[in] simTime Kit's simulation time that's passed to ComputeGraphImpl::updateV2()
* @param[in] hydraTime The current time value of the USD Stage
*/
void(CARB_ABI* postRenderUpdate)(rtx::resourcemanager::SyncScopeId syncScope,
omni::usd::hydra::ViewportHydraRenderResults* renderResults,
omni::usd::PathH renderProductPrimPath,
double simTime,
double hydraTime);
/**
* @brief postRenderBegin is called after IHydraEngine::render() after the postRender ends
* in order to tick graphs in the postRender pipeline stage.
*
* @param[in] syncScope ID for the resource manager's sync scope
* @return The status of the postRender
*/
gpu::GfResult(CARB_ABI* postRenderEnd)(rtx::resourcemanager::SyncScopeId syncScope);
/**
* Returns the number of global orchestration graphs given a particular graph pipeline stage.
*
* @param[in] pipelineStage The stage of the pipeline (simulation, pre-render, post-render)
* @return the number of graphs in that pipeline stage
*/
size_t(CARB_ABI* getGraphCountInPipelineStage)(GraphPipelineStage pipelineStage);
/**
* Fills the buffer with global orchestration graph objects that occupy a particular pipeline stage
*
* @param[out] graphObjs The buffer of graphs to fill
* @param[in] bufSize The size of the buffer in terms of the number of GraphObj it has room for
* @param[in] pipelineStage The stage of the pipeline (simulation, pre-render, post-render)
* @return true on success, false on failure
*/
bool(CARB_ABI* getGraphsInPipelineStage)(GraphObj* graphObjs, size_t bufSize, GraphPipelineStage pipelineStage);
/**
* @brief This update function is used by DriveSim's ISimStep interface to tick the graph
*
* @param[in] timeNS Simulation time, in nanoseconds
* @param[in] counter Tasking counter
* @param[in] disableUsdUpdates Turn off USD updates while the simulation step is happening
*/
void(CARB_ABI* updateSimStepUsd)(int64_t timeNS, carb::tasking::Counter* counter, bool disableUsdUpdates);
/**
* @brief Gets the interface object handling the node categories
*
* @return Raw object that implements the node category interface
*/
INodeCategories*(CARB_ABI* getNodeCategoriesInterface)();
/**
* @brief Gets an ONI object for the interface handling the node categories
*
* @return Shared object that implements the node category interface
*/
inline omni::core::ObjectPtr<INodeCategories> getNodeCategoriesInterfacePtr() const
{
return omni::core::steal(getNodeCategoriesInterface());
}
/**
* @brief Sets the test failure state. This is for test failures that cannot be caught by conventional means.
*
* @param[in] hasFailed If true then increment the test failure count, otherwise reset it to 0.
*/
void (CARB_ABI* setTestFailure)(bool hasFailed);
/**
* @return Returns the current test failure count since it was last cleared.
*/
size_t (CARB_ABI* testFailureCount)();
/**
* @brief Gets an ONI object for the interface handling the bundle factory
*
* @return Shared object that implements the bundle factory interface
*/
IBundleFactory*(CARB_ABI* getBundleFactoryInterface)();
/**
* @brief Gets an ONI object for the interface handling the bundle factory
*
* @return Shared object that implements the bundle factory interface
*/
inline omni::core::ObjectPtr<IBundleFactory> getBundleFactoryInterfacePtr() const
{
return omni::core::steal(getBundleFactoryInterface());
}
/**
* @brief Updates the graphs
*
* @param[in] currentTime Time at which the graphs are being updated
* @param[in] elapsedSecs Amount of time elapsed in overall graph updates
* @param[in] absoluteSimTime Time at which the simulation graph updates
* @param[in] updateSettings Any settings required by the update
* @param[in] userData Raw user data to pass to the update
*/
void(CARB_ABI* updateV2)(double currentTime,
float elapsedSecs,
double absoluteSimTime,
const omni::kit::StageUpdateSettings* updateSettings,
void* userData);
/**
* @brief preRenderBegin is called before IHydraEngine::render() before the preRender starts
* in order to tick graphs in the prerender pipeline stage.
*
* @param[in] renderGraph Render graph that is about to be ticked
*/
void(CARB_ABI* preRenderBegin)(gpu::rendergraph::RenderGraph renderGraph);
/**
* @brief preRenderUpdate is called before IHydraEngine::render() when the preRender updates
* in order to tick graphs in the prerender pipeline stage.
*
* @param[in] simTime Time at which the render graph is being updated according to the simulation graph
* @param[in] hydraTime Time at which the render graph is being updated according to Hydra
* @param[in] mutex Mutex for locking USD while updating
*/
void(CARB_ABI* preRenderUpdate)(double simTime,
double hydraTime,
omni::usd::IUsdMutex& mutex);
/**
* @brief preRenderEnd is called before IHydraEngine::render() after the preRender is done
* in order to tick graphs in the prerender pipeline stage.
*/
void(CARB_ABI* preRenderEnd)();
/**
* Flushes any pending USD changes from the fabric scene delegate.
*/
void(CARB_ABI* flushUsd)();
/**
* @brief Gets the interface object handling the node type forwarding
*
* @return Shared object that implements the node type forwarding interface
*/
unstable::INodeTypeForwarding*(CARB_ABI* getNodeTypeForwardingInterface)();
inline omni::core::ObjectPtr<unstable::INodeTypeForwarding> getNodeTypeForwardingInterfacePtr() const
{
return omni::core::steal(getNodeTypeForwardingInterface());
}
unstable::IDirtyID2*(CARB_ABI* getDirtyIDInterface)(GraphContextObj const& context);
omni::core::ObjectPtr<unstable::IDirtyID2> getDirtyIDInterfacePtr(GraphContextObj const& context) const
{
return omni::core::steal(getDirtyIDInterface(context));
}
IBundleChanges*(CARB_ABI* getBundleChangesInterface)(GraphContextObj const& context);
omni::core::ObjectPtr<IBundleChanges> getBundleChangesInterfacePtr(GraphContextObj const& context) const
{
return omni::core::steal(getBundleChangesInterface(context));
}
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(ComputeGraph, getBundleChangesInterface, 28)
}
}
}
|
omniverse-code/kit/include/omni/graph/core/PreUsdInclude.h | // Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// This file should be included in conjunction with PostUsdInclude.h when including any of the individual USD
// definition files. They have a number of warnings that are triggered by our default build configuration that these
// files silence as being third party includes we can't fix them ourselves.
//
// It includes a special ifdef detection to prevent inclusion of this file without PostUsdInclude.h as that
// would have unpredictable effects on the compiler. The usual "#pragma once" is omitted, so that the mechanism works
// correctly even with multiple uses. It is not legal to include anything other than USD headers between these two.
//
// Here is an example of how you use this mechanism to include the definition of the USD type pxr::GfHalf:
//
// #include <omni/graph/core/PreUsdInclude.h>
// #include <pxr/base/gf/half.h>
// #include <omni/graph/core/PostUsdInclude.h>
//
// NOTE: At some point when USD is upgraded these might no longer be required; at that time they can be deprecated
//
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable : 4244) // = Conversion from double to float / int to float
# pragma warning(disable : 4267) // conversion from size_t to int
# pragma warning(disable : 4305) // argument truncation from double to float
# pragma warning(disable : 4800) // int to bool
# pragma warning(disable : 4996) // call to std::copy with parameters that may be unsafe
# pragma warning(disable : 4003) // not enough arguments for function-like macro invocation
# define NOMINMAX // Make sure nobody #defines min or max
#elif defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
# pragma GCC diagnostic ignored "-Wunused-local-typedefs"
# pragma GCC diagnostic ignored "-Wunused-function"
# pragma GCC diagnostic ignored "-Wunused-variable"
// This suppresses deprecated header warnings, which is impossible with pragmas.
// Alternative is to specify -Wno-deprecated build option, but that disables other useful warnings too.
# ifdef __DEPRECATED
# define OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS
# undef __DEPRECATED
# endif
#endif
#ifdef __USD_INCLUDE_PROTECTION__
# error "You must include PostUsdInclude.h after including PreUsdInclude.h"
#else
# define __USD_INCLUDE_PROTECTION__
#endif
|
omniverse-code/kit/include/omni/graph/core/PyINodeCategories.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindINodeCategories(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::core::INodeCategories_abi>,
omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::INodeCategories_abi>>,
omni::core::IObject>
clsParent(m, "_INodeCategories");
py::class_<omni::graph::core::INodeCategories, omni::core::Generated<omni::graph::core::INodeCategories_abi>,
omni::python::detail::PyObjectPtr<omni::graph::core::INodeCategories>, omni::core::IObject>
cls(m, "INodeCategories",
R"OMNI_BIND_RAW_(Interface to the list of categories that a node type can belong to )OMNI_BIND_RAW_");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::core::INodeCategories>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::core::INodeCategories>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::core::INodeCategories instantiation");
}
return tmp;
}));
cls.def_property_readonly("category_count", &omni::graph::core::INodeCategories::getCategoryCount);
cls.def("define_category",
[](omni::graph::core::INodeCategories* self, const char* categoryName, const char* categoryDescription)
{
auto return_value = self->defineCategory(categoryName, categoryDescription);
return return_value;
},
R"OMNI_BIND_RAW_(Define a new category
@param[in] categoryName Name of the new category
@param[in] categoryDescription Description of the category
@return false if there was already a category with the given name)OMNI_BIND_RAW_",
py::arg("category_name"), py::arg("category_description"));
cls.def("remove_category",
[](omni::graph::core::INodeCategories* self, const char* categoryName)
{
auto return_value = self->removeCategory(categoryName);
return return_value;
},
R"OMNI_BIND_RAW_(Remove an existing category, mainly to manage the ones created by a node type for itself
@param[in] categoryName Name of the category to remove
@return false if there was no category with the given name)OMNI_BIND_RAW_",
py::arg("category_name"));
return omni::python::PyBind<omni::graph::core::INodeCategories>::bind(cls);
}
|
omniverse-code/kit/include/omni/graph/core/IBundle.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "bundle/IBundle2.h"
|
omniverse-code/kit/include/omni/graph/core/SlangScript.h | // Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Framework.h>
#include <carb/graphics/Graphics.h>
#include <omni/graph/core/PreUsdInclude.h>
#include <pxr/base/tf/token.h>
#include <pxr/base/tf/type.h>
#include <pxr/usd/sdf/path.h>
#include <omni/graph/core/PostUsdInclude.h>
#include <slang/slang.h>
#include <slang/slang-com-ptr.h>
#define SLANG_PRELUDE_NAMESPACE CPPPrelude
#include <string>
#include <vector>
#include <slang/prelude/slang-cpp-types.h>
namespace omni
{
namespace graph
{
namespace core
{
struct SlangScript
{
// A ResizeSpec allows the user to set (using USD) the size of an output
// array to the size of an input array
struct ResizeSpec
{
std::string outputArray;
std::string inputArray;
};
std::vector<ResizeSpec> resizeSpec;
// Whether to run on CPU or GPU
gpucompute::Target target;
// Compiler output
gpucompute::Shader* shader = nullptr;
gpucompute::ComputeCompiler* compiler = nullptr;
SlangScript(const char* codeString,
gpucompute::Target target,
const std::vector<ResizeSpec>& resizeSpec,
carb::graphics::Device* device);
~SlangScript()
{
if (shader)
compiler->destroyShader(*shader);
}
};
}
}
}
|
omniverse-code/kit/include/omni/graph/core/Handle.h | // Copyright (c) 2021-2023 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/fabric/IPath.h>
#include <omni/fabric/IToken.h>
#include <omni/fabric/IFabric.h>
#include <omni/graph/core/TemplateUtils.h>
// Support for generic handles for interface objects.
// Ideally each of the handle types would be put into interface files for their corresponding types. In the
// current scheme they are too intertwined to separate them cleanly. Having this file provides a way for code
// to have access to handles for passing around without pulling in all of the unrelated interfaces.
namespace omni {
namespace graph {
namespace core {
/**
* Macro to validate the structure of the interface definitions. New functions must be added at the end,
* and the struct itself must be a standard layout POD, as per Carbonite requirements.
* @param[in] StructName Name of the structure being validated
* @param[in] LastFunction Name of the last function declared in the structure
* @param[in] NumberOfFunctions Total number of functions declared in the structure
*/
#define STRUCT_INTEGRITY_CHECK(StructName, LastFunction, NumberOfFunctions) \
static_assert(offsetof(StructName, LastFunction) == NumberOfFunctions * sizeof(void(*)()), \
"New " # StructName " ABI methods must be added at the end"); \
static_assert(std::is_trivial<StructName>::value, # StructName " must be a POD"); \
static_assert(std::is_standard_layout<StructName>::value, # StructName " must have std layout"); \
static_assert(sizeof(StructName) == (NumberOfFunctions+1) * sizeof(void (*)()), \
"Please update the integrity check macro to point to the last method of " #StructName);
// ==============================================================================================================
/**
* @brief Template class for defining handles to various OmniGraph data types
*
* @tparam T The underlying type of the handle being defined
* @tparam SUBCLASS The handle subclass being defined
*/
template <typename T, typename SUBCLASS>
class HandleBase
{
public:
//! Default constructor, gives an invalid handle
HandleBase() = default;
//! Default copy constructor, bitwise copy of handle data
HandleBase(const HandleBase&) = default;
//! Construct a handle from the underlying data it represents
explicit HandleBase(const T& h) : handle(h)
{
}
//! Default assignment operator, bitwise copy of the handle data
HandleBase& operator=(const HandleBase&) = default;
//! Cast to the underlying data type of the handle
explicit operator T() const
{
return handle;
}
//! Returns true if the handle is currently valid
bool isValid() const
{
return (handle != SUBCLASS::invalidValue());
}
//! Equality operator - only identical handles are equal
bool operator==(HandleBase rhs) const
{
return handle == rhs.handle;
}
//! Inequality operator - only identical handles are equal
bool operator!=(HandleBase rhs) const
{
return !(handle == rhs.handle);
}
//! Constant representing a unique invalid handle for this instantiation
static constexpr SUBCLASS invalidHandle()
{
return static_cast<SUBCLASS>(SUBCLASS::invalidValue());
}
protected:
T handle; //!< Instantiation of the underlying type of the handle
};
//! Underlying data type for a handle being represented as an integer
using HandleInt = uint64_t;
//! Representation of a string that is referenced via a unique token. Note that although this token is
//! reference counted this is the raw value which is not. Use ogn::Token at the high level to get that
//! reference counting if you intend to hang on to copies of the token.
using NameToken = omni::fabric::TokenC;
//! Representation of a path. Note that although this path is reference counted this is the raw value
//! which is not. Use ogn::Path at the high level to get that reference counting if you intend to hang
//! on to copies of the path.
using TargetPath = omni::fabric::PathC;
//! Location of rows of data in Fabric
using BucketId = omni::fabric::BucketId;
//! Representation of an internal type used by AutoNode
using ObjectId = HandleInt;
// NOTE: Due to Linux debug linking pre-C++17 not liking constexpr static variables,
// other than built-in integer types, being passed by const reference,
// invalid values for handles are now constructed via inline functions, instead
// of using constexpr static variables. Just to play it safe, we're using a macro
// here, in case it also has an issue with built-in integer types being referenced
// from an inline function at compile-time. Note that the link errors don't show
// up until *runtime*, since they appear to be handled via dynamic linking.
//! Representation of an invalid handle as an integer
#define OG_INVALID_HANDLE_INT_VALUE (~HandleInt(0))
//! Representation of an invalid handle as an integer
constexpr static HandleInt kInvalidHandleIntValue = OG_INVALID_HANDLE_INT_VALUE;
// ==============================================================================================================
//! Handle type representing attributes, which require two parts to be valid
using AttrKey = std::pair<HandleInt, HandleInt>;
//! This is here so we can use AttrKey and ConstAttributeDataHandle as a key to std::unordered_map.
//! There is another below in ConstAttributeDataHandleHash, intentionally the same as they represent the same data.
//!
//! See https://www.techiedelight.com/use-std-pair-key-std-unordered_map-cpp/ for a full discussion.
struct AttrKeyHash
{
//! Return a hash value for the underlying handle data
std::size_t operator()(const AttrKey& attrKey) const
{
return std::hash<HandleInt>()(attrKey.first) ^ std::hash<HandleInt>()(attrKey.second);
}
};
// ==============================================================================================================
//! Object representing a handle to an AttributeData type
class ConstAttributeDataHandle : public HandleBase<AttrKey, ConstAttributeDataHandle>
{
public:
using HandleBase<AttrKey, ConstAttributeDataHandle>::HandleBase;
//! Path to the prim or bundle of the attribute, e.g. in "/world/cube.size", this returns "/world/cube"
omni::fabric::PathC path() const noexcept { return handle.first; }
//! Name of the attribute, e.g. in "/world/cube.size", this would be "size"
omni::fabric::TokenC name() const noexcept { return handle.second; }
//! Returns an invalid AttributeData handle value
static constexpr AttrKey invalidValue()
{
return std::make_pair(HandleInt(omni::fabric::kUninitializedPath.path),
HandleInt(omni::fabric::kUninitializedToken.token));
}
};
// --------------------------------------------------------------------------------------------------------------
//! Hash definition so that AttributeDataHandle can be used in a map
struct ConstAttributeDataHandleHash
{
//! Returns a hash value unique for AttributeData handles
std::size_t operator()(const ConstAttributeDataHandle& attrDataHandle) const
{
AttrKey attrKey(attrDataHandle);
return AttrKeyHash()(attrKey);
}
};
// --------------------------------------------------------------------------------------------------------------
//! Object representing a handle to a variable AttributeData type
class AttributeDataHandle : public HandleBase<AttrKey, AttributeDataHandle>
{
public:
using HandleBase<AttrKey, AttributeDataHandle>::HandleBase;
//! Returns an invalid AttributeData handle value
static constexpr AttrKey invalidValue()
{
return std::make_pair(HandleInt(omni::fabric::kUninitializedPath.path),
HandleInt(omni::fabric::kUninitializedToken.token));
}
//! Path to the prim or bundle of the attribute, e.g. in "/world/cube.size", this returns "/world/cube"
omni::fabric::PathC path() const noexcept { return handle.first; }
//! Name of the attribute, e.g. in "/world/cube.size", this would be "size"
omni::fabric::TokenC name() const noexcept { return handle.second; }
//! Returns a constant AttributeDataHandle pointing to the same AttributeData as this variable one
operator ConstAttributeDataHandle() const
{
return ConstAttributeDataHandle(AttrKey(*this));
}
};
// ==============================================================================================================
//! Object representing a handle to a constant OmniGraph Bundle
class ConstBundleHandle : public HandleBase<HandleInt, ConstBundleHandle>
{
public:
using HandleBase<HandleInt, ConstBundleHandle>::HandleBase;
//! Returns an invalid Bundle handle value
static constexpr HandleInt invalidValue()
{
return omni::fabric::kUninitializedPath.path;
}
};
// --------------------------------------------------------------------------------------------------------------
//! Hash definition so that BundleHandle can be used in a map
struct ConstBundleHandleHash
{
//! Returns a hash value unique for Bundle handles
std::size_t operator()(const ConstBundleHandle& handle) const
{
return std::hash<HandleInt>()(HandleInt(handle));
}
};
// --------------------------------------------------------------------------------------------------------------
//! Object representing a handle to an OmniGraph Bundle
class BundleHandle : public HandleBase<HandleInt, BundleHandle>
{
public:
using HandleBase<HandleInt, BundleHandle>::HandleBase;
//! Returns an invalid Bundle handle value
static constexpr HandleInt invalidValue()
{
return omni::fabric::kUninitializedPath.path;
}
//! Returns a constant BundleHandle pointing to the same Bundle as this variable one
operator ConstBundleHandle() const
{
return ConstBundleHandle(HandleInt(*this));
}
};
//! Deprecated - for backward compatibility only
using ConstPrimHandle [[deprecated("Use ConstBundleHandle!")]] = ConstBundleHandle;
//! Deprecated - for backward compatibility only
using ConstPrimHandleHash [[deprecated("Use ConstBundleHandleHash!")]] = ConstBundleHandleHash;
//! Deprecated - for backward compatibility only
using PrimHandle [[deprecated("Use BundleHandle!")]] = BundleHandle;
// ==============================================================================================================
//! Object representing a handle to an OmniGraph NodeContext
class NodeContextHandle : public HandleBase<HandleInt, NodeContextHandle>
{
public:
using HandleBase<HandleInt, NodeContextHandle>::HandleBase;
//! Returns an invalid NodeContext handle value
static constexpr HandleInt invalidValue()
{
return kInvalidHandleIntValue;
}
};
// ======================================================================
// Support for attributes
using AttributeHandle = uint64_t; //!< Handle to an OmniGraph Attribute
using AttributeHash = uint64_t; //!< Hash value type for OmniGraph Attributes
static constexpr AttributeHandle kInvalidAttributeHandle = 0; //!< Constant representing an invalid attribute handle
struct IAttribute;
//! Object representing an OmniGraph Attribute
struct AttributeObj
{
const IAttribute* iAttribute; //!< Interface to functionality on the attribute
AttributeHandle attributeHandle; //!< Opaque handle to actual underlying attribute
//! Returns true if this object refers to a valid attribute
bool isValid() const
{
return (attributeHandle != kInvalidAttributeHandle);
}
};
// ======================================================================
// Support for node types
using NodeTypeHandle = uint64_t; //!< Handle to an OmniGraph NodeType
static constexpr NodeTypeHandle kInvalidNodeTypeHandle = 0; //!< Constant representing an invalid node type handle
struct INodeType;
//! Object representing an OmniGraph NodeType
struct NodeTypeObj
{
const INodeType* iNodeType; //!< Interface to functionality on the node type
NodeTypeHandle nodeTypeHandle; //!< Opaque handle to actual underlying node type - managed by OmniGraph
//! Returns true if this object refers to a valid node type
bool isValid() const
{
return (nodeTypeHandle != kInvalidNodeTypeHandle);
}
};
// ======================================================================
// Support for evaluation contexts
struct IGraphContext;
struct IBundle;
struct IAttributeData;
using GraphContextHandle = uint64_t; //!< Handle to an OmniGraph GraphContext
static constexpr GraphContextHandle kInvalidGraphContextHandle = 0; //!< Constant representing an invalid graph context handle
/**
* @brief Object representing an OmniGraph GraphContext
*/
struct GraphContextObj
{
const IGraphContext* iContext; //!< Interfaces to functionality on the context
// Convenience location for commonly used interfaces
const IBundle* iBundle; //!< Cached ABI interface pointer
const IAttributeData* iAttributeData; //!< Cached ABI interface pointer
const omni::fabric::IToken* iToken; //!< Cached ABI interface pointer
const omni::fabric::IPath* iPath; //!< Cached ABI interface pointer
GraphContextHandle contextHandle; //!< Opaque handle to actual underlying graph context
//! Returns true if this object refers to a valid graph context
bool isValid() const
{
return (contextHandle != kInvalidGraphContextHandle);
}
};
// ======================================================================
// Support for nodes
using NodeHandle = uint64_t;
static constexpr NodeHandle kInvalidNodeHandle = 0; //!< Constant representing an invalid node handle
struct INode;
//! Object representing an OmniGraph Node
struct NodeObj
{
//! Interface to functionality on the node
const INode* iNode{ nullptr };
//! Opaque handle to actual underlying node - managed by compute graph system
NodeHandle nodeHandle{ kInvalidNodeHandle };
//! handle used to retrieve data on the node - every node has a NodeContextHandle, but not the other way around
NodeContextHandle nodeContextHandle{ NodeContextHandle::invalidValue() };
//! Returns true if this object refers to a valid node
bool isValid() const
{
return (nodeHandle != kInvalidNodeHandle);
}
};
// ======================================================================
// Support for graphs
using GraphHandle = uint64_t;
static constexpr GraphHandle kInvalidGraphHandle = 0; //!< Constant representing an invalid graph handle
struct IGraph;
//! Object representing an OmniGraph Graph
struct GraphObj
{
IGraph* iGraph; //!< Interface to functionality on the graph
GraphHandle graphHandle; //!< Opaque handle to actual underlying graph
//! Returns true if this object refers to a valid graph
bool isValid() const
{
return (graphHandle != kInvalidGraphHandle);
}
};
// ======================================================================
// Support for schedule nodes
using ScheduleNodeHandle = uint64_t; //!< Handle to an OmniGraph ScheduleNode
static constexpr ScheduleNodeHandle kInvalidScheduleNodeHandle = 0; //!< Constant representing an invalid schedule node handle
struct IScheduleNode;
//! Object representing an OmniGraph ScheduleNode
struct ScheduleNodeObj
{
const IScheduleNode* iScheduleNode; //!< Interface to functionality on the schedule node
ScheduleNodeHandle scheduleNodeHandle; //!< Opaque handle to actual underlying schedule node
//! Returns true if this object refers to a valid schedule node
bool isValid() const
{
return (scheduleNodeHandle != kInvalidScheduleNodeHandle);
}
};
// ======================================================================
/**
* SFINAE function that will call setContext on an object if it exists as "void setContext(GraphContextObj&)".
* This allows setting a context in the wrapper functions to percolate down to the member implementations
* when appropriate.
*
* Usage:
* OptionalMethod::setContext<ClassType>(classMember, context);
*/
template <class NodeTypeClass>
using has_setContext = typename std::is_same<void,
decltype(std::declval<NodeTypeClass&>().setContext(
std::declval<const GraphContextObj&>()))>::value_type;
/**
* SFINAE function that will call setHandle on an object if it exists as "void setHandle(GraphContextObj&)".
* This allows setting a handle in the wrapper functions to percolate down to the member implementations
* when appropriate.
*
* Usage:
* OptionalMethod::setHandle<ClassType, HandleType>(classMember, handle);
*/
template <class NodeTypeClass, typename HandleType>
using has_setHandle = typename std::is_same<void,
decltype(std::declval<NodeTypeClass&>().setHandle(
std::declval<HandleType>()))>::value_type;
//! Helper struct to make it easy to reference methods on a class that may or may not be defined.
struct OptionalMethod
{
private:
template <typename ClassToSet, typename HandleType>
static void call_setHandle(ClassToSet& member, HandleType handle, std::true_type)
{
member.setHandle(handle);
}
template <typename ClassToSet, typename HandleType>
static void call_setHandle(ClassToSet& member, HandleType handle, std::false_type)
{
}
template <typename ClassToSet>
static void call_setContext(ClassToSet& member, const GraphContextObj& context, std::true_type)
{
member.setContext(context);
}
template <typename ClassToSet>
static void call_setContext(ClassToSet& member, const GraphContextObj& context, std::false_type)
{
}
public:
/**
* @brief Set the Handle object
*
* @tparam ClassToSet Object class on which to set the handle
* @tparam HandleType Handle class that is the object class member containing the handle
* @param member Object on which the handle is to be set
* @param handle Handle to be set on the object
*/
template <typename ClassToSet, typename HandleType>
static void setHandle(ClassToSet& member, HandleType handle)
{
call_setHandle(member, handle, is_detected<has_setHandle, ClassToSet, HandleType>());
}
/**
* @brief Set the Context object, if an implementation exists
*
* @tparam ClassToSet Object class on which to set the context
* @param member Object on which the context is to be set
* @param context Context to be set on the object
*/
template <typename ClassToSet>
static void setContext(ClassToSet& member, const GraphContextObj& context)
{
call_setContext(member, context, is_detected<has_setContext, ClassToSet>());
}
};
/*
_____ _ _
| __ \ | | | |
| | | | ___ _ __ _ __ ___ ___ __ _| |_ ___ __| |
| | | |/ _ \ '_ \| '__/ _ \/ __/ _` | __/ _ \/ _` |
| |__| | __/ |_) | | | __/ (_| (_| | || __/ (_| |
|_____/ \___| .__/|_| \___|\___\__,_|\__\___|\__,_|
| |
|_|
May go away at any time - what you should use are in the comments
*/
//! Deprecated - use kInvalidTokenValue
constexpr static HandleInt INVALID_TOKEN_VALUE = ~HandleInt(0);
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/Accessors.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/graph/core/CppWrappers.h>
#include <omni/graph/core/CudaUtils.h>
#include <omni/graph/core/Type.h>
#include <omni/graph/core/tuple.h>
#include <omni/graph/core/Handle.h>
namespace omni
{
namespace graph
{
namespace core
{
/**
* Retrieves the Nth upstream connected attribute, invalid object if it isn't there. This is a utility function
* that lets callers avoid the hassle of going through the allocation/deallocation required for ABI access.
*
* @template N Index of the upstream attribute
* @param[in] attrObj The attribute object for which to retrieve the connection
* @return Nth upstream attribute, or invalid if there is none
*/
template <int Count>
AttributeObj getNthUpstreamAttribute(const AttributeObj& attrObj)
{
size_t connectionCount = attrObj.iAttribute->getUpstreamConnectionCount(attrObj);
if (connectionCount > Count)
{
AttributeObj* attrObjs = reinterpret_cast<AttributeObj*>(alloca(sizeof(AttributeObj) * connectionCount));
attrObj.iAttribute->getUpstreamConnections(attrObj, attrObjs, connectionCount);
AttributeObj toReturn = attrObjs[Count];
return toReturn;
}
return AttributeObj{nullptr, kInvalidAttributeHandle};
}
/**
* Retrieves the Nth upstream attribute if it exists, or the passed-in attribute if not.
*
* @template N Index of the upstream attribute
* @param[in] attrObj The attribute object for which to retrieve the connection
* @return Nth upstream attribute, or the passed-in attribute if there is none
*/
template <int Count>
AttributeObj getNthUpstreamAttributeOrSelf(const AttributeObj& attrObj)
{
AttributeObj nth = getNthUpstreamAttribute<Count>(attrObj);
return nth.iAttribute ? nth : attrObj;
}
template <typename T>
constexpr BaseDataType baseDataTypeForType()
{
using Type = typename std::remove_cv<T>::type;
if (std::is_same<Type, int>::value)
{
return BaseDataType::eInt;
}
if (std::is_same<Type, int64_t>::value)
{
return BaseDataType::eInt64;
}
if (std::is_same<Type, float>::value)
{
return BaseDataType::eFloat;
}
if (std::is_same<Type, double>::value)
{
return BaseDataType::eDouble;
}
CUDA_SAFE_ASSERT("Type not implemented");
return BaseDataType::eUnknown;
}
constexpr bool isNumericBaseType(BaseDataType type)
{
switch (type)
{
case BaseDataType::eInt:
case BaseDataType::eInt64:
case BaseDataType::eFloat:
case BaseDataType::eDouble:
return true;
default:
return false;
}
}
// Scalar numeric attribute accessor.
template <typename T>
class NumericAccessor
{
public:
static constexpr BaseDataType expectedType = baseDataTypeForType<T>();
static constexpr bool readOnly = std::is_const<T>::value;
using RawType = typename std::remove_cv<T>::type;
using HandleType = typename std::conditional<readOnly, ConstAttributeDataHandle, AttributeDataHandle>::type;
NumericAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), componentCount(0), elementCount(0)
{
}
NumericAccessor(const NumericAccessor&) = default;
NumericAccessor& operator=(const NumericAccessor&) = default;
#if 0
// The enable_if makes this valid only if T is const, since a non-const accessor can't
// be initialized with a const attribute handle, but a const accessor can be initialized
// from a const or non-const attribute handle.
template <typename HANDLE_TYPE,
typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstAttributeDataHandle>::value ||
std::is_same<HANDLE_TYPE, AttributeDataHandle>::value),
int>::type>
NumericAccessor(const GraphContextObj& context,
HANDLE_TYPE attributeHandle,
IGNORED = 0)
: NumericAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
attributeType = baseType;
const void* pData;
context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(const void* const*)pData);
componentCount = type.componentCount;
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
}
}
}
// The enable_if makes this valid only if T is non-const, since a non-const accessor can
// only be initialized with a non-const attribute handle.
template <typename IGNORED = typename std::enable_if<!readOnly, int>::type>
NumericAccessor(const GraphContextObj& context,
AttributeDataHandle attributeHandle,
IGNORED = 0)
: NumericAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
attributeType = baseType;
void* pData;
context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(void* const*)pData);
componentCount = type.componentCount;
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
}
}
}
// Construct a NumericAccessor by prim and attribute name.
//
// The enable_if makes this valid only if T is const, since a non-const accessor can't
// be initialized with a const attribute handle, but a const accessor can be initialized
// from a const or non-const attribute handle.
template <typename HANDLE_TYPE,
typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value ||
std::is_same<HANDLE_TYPE, BundleHandle>::value),
int>::type>
NumericAccessor(const GraphContextObj& context,
HANDLE_TYPE primHandle,
Token attributeName,
IGNORED = 0)
: NumericAccessor()
{
if (!primHandle.isValid())
return;
ConstAttributeDataHandle attributeHandle;
NameToken nameToken(attributeName);
context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &nameToken, 1);
*this = NumericAccessor(context, attributeHandle);
}
// Construct a NumericAccessor by non-const prim and attribute name.
//
// The enable_if makes this valid only if T is non-const, since a non-const accessor can
// only be initialized with a non-const attribute handle.
template <typename IGNORED = typename std::enable_if<!readOnly, int>::type>
NumericAccessor(const GraphContextObj& context,
BundleHandle primHandle,
NameToken attributeName,
IGNORED = 0)
: NumericAccessor()
{
if (!primHandle.isValid())
return;
AttributeDataHandle attributeHandle;
context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &attributeName, 1);
*this = NumericAccessor(context, attributeHandle);
}
#endif
bool isValid() const
{
return (data != nullptr);
}
size_t getComponentCount() const
{
return componentCount;
}
size_t getElementCount() const
{
return elementCount;
}
// Returns a pointer to the data if it's a perfect type match, else nullptr.
T* getPerfectMatch() const
{
return reinterpret_cast<T*>((attributeType == expectedType) ? data : nullptr);
}
CUDA_CALLABLE
RawType get(size_t i = 0) const
{
CUDA_SAFE_ASSERT(data != nullptr);
CUDA_SAFE_ASSERT(i < componentCount * elementCount);
if (attributeType == expectedType)
{
return reinterpret_cast<const RawType*>(data)[i];
}
switch (attributeType)
{
case BaseDataType::eInt:
return RawType(reinterpret_cast<const int*>(data)[i]);
case BaseDataType::eInt64:
return RawType(reinterpret_cast<const int64_t*>(data)[i]);
case BaseDataType::eFloat:
return RawType(reinterpret_cast<const float*>(data)[i]);
case BaseDataType::eDouble:
return RawType(reinterpret_cast<const double*>(data)[i]);
}
CUDA_SAFE_ASSERT(0);
return RawType(0);
}
// The enable_if makes this valid only if T is non-const.
template <bool IsEnabled = true, typename std::enable_if<(IsEnabled && !readOnly), int>::type = 0>
CUDA_CALLABLE void set(RawType value, size_t i = 0) const
{
CUDA_SAFE_ASSERT(data != nullptr);
CUDA_SAFE_ASSERT(i < componentCount * elementCount);
if (attributeType == expectedType)
{
reinterpret_cast<RawType*>(data)[i] = value;
}
else
{
switch (attributeType)
{
case BaseDataType::eInt:
reinterpret_cast<int*>(data)[i] = int(value);
break;
case BaseDataType::eInt64:
reinterpret_cast<int64_t*>(data)[i] = int64_t(value);
break;
case BaseDataType::eFloat:
reinterpret_cast<float*>(data)[i] = float(value);
break;
case BaseDataType::eDouble:
reinterpret_cast<double*>(data)[i] = double(value);
break;
default:
CUDA_SAFE_ASSERT(0);
break;
}
}
}
// TODO: make getNumericAccessor functions be friends of this class and make data private again
// private:
BaseDataType attributeType;
using VoidType = typename std::conditional<readOnly, const void, void>::type;
VoidType* data;
size_t componentCount;
size_t elementCount;
};
// CPU accessors
// When the dest is const, source can either be ConstBundleHandle or BundleHandle
template <typename T,
typename HANDLE_TYPE,
typename IGNORED =
typename std::enable_if<std::is_const<T>::value && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value ||
std::is_same<HANDLE_TYPE, BundleHandle>::value),
int>::type>
NumericAccessor<T> getNumericAccessor(const GraphContextObj& context,
HANDLE_TYPE primHandle,
Token attributeName,
IGNORED = 0)
{
NumericAccessor<T> accessor;
if (!primHandle.isValid())
return accessor;
ConstAttributeDataHandle attributeHandle;
NameToken nameToken(attributeName);
context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &nameToken, 1);
if (!attributeHandle.isValid())
return accessor;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
accessor.attributeType = baseType;
const void* pData;
context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1);
accessor.data = (depth == 0) ? pData : (*(const void* const*)pData);
accessor.componentCount = type.componentCount;
accessor.elementCount = omni::graph::core::getElementCount(context, attributeHandle);
}
}
return accessor;
}
// When the dest is non-const, source can only be BundleHandle
template <typename T, typename IGNORED = typename std::enable_if<!std::is_const<T>::value, int>::type>
NumericAccessor<T> getNumericAccessor(const GraphContextObj& context,
BundleHandle primHandle,
Token attributeName,
IGNORED = 0)
{
NumericAccessor<T> accessor;
if (!primHandle.isValid())
return accessor;
AttributeDataHandle attributeHandle;
NameToken nameToken(attributeName);
context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &nameToken, 1);
if (!attributeHandle.isValid())
return accessor;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
accessor.attributeType = baseType;
void* pData;
context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1);
accessor.data = (depth == 0) ? pData : (*(void* const*)pData);
accessor.componentCount = type.componentCount;
accessor.elementCount = omni::graph::core::getElementCount(context, attributeHandle);
}
}
return accessor;
}
// GPU accessors
// When the dest is const, source can either be ConstBundleHandle or BundleHandle
template <typename T,
typename HANDLE_TYPE,
typename IGNORED =
typename std::enable_if<std::is_const<T>::value && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value ||
std::is_same<HANDLE_TYPE, BundleHandle>::value),
int>::type>
NumericAccessor<T> getNumericAccessorGPU(const GraphContextObj& context, HANDLE_TYPE primHandle, Token attributeName)
{
NumericAccessor<T> accessor;
if (!primHandle.isValid())
return accessor;
ConstAttributeDataHandle attributeHandle;
NameToken nameToken(attributeName);
context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &nameToken, 1);
if (!attributeHandle.isValid())
return accessor;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
accessor.attributeType = baseType;
const void* pData;
context.iAttributeData->getDataRGPU(&pData, context, &attributeHandle, 1);
accessor.data = (depth == 0) ? pData : (*(const void* const*)pData);
accessor.componentCount = type.componentCount;
accessor.elementCount = omni::graph::core::getElementCount(context, attributeHandle);
}
}
return accessor;
}
// When the dest is non-const, source can only be BundleHandle
template <typename T, typename IGNORED = typename std::enable_if<!std::is_const<T>::value, int>::type>
NumericAccessor<T> getNumericAccessorGPU(const GraphContextObj& context,
BundleHandle primHandle,
Token attributeName,
IGNORED = 0)
{
NumericAccessor<T> accessor;
if (!primHandle.isValid())
return accessor;
AttributeDataHandle attributeHandle;
NameToken nameToken(attributeName);
context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &nameToken, 1);
if (!attributeHandle.isValid())
return accessor;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
accessor.attributeType = baseType;
void* pData;
context.iAttributeData->getDataWGPU(&pData, context, &attributeHandle, 1);
accessor.data = (depth == 0) ? pData : (*(void* const*)pData);
accessor.componentCount = type.componentCount;
accessor.elementCount = omni::graph::core::getElementCount(context, attributeHandle);
}
}
return accessor;
}
template <typename T, size_t N>
class VectorAccessor
{
public:
static constexpr BaseDataType expectedType = baseDataTypeForType<T>();
static constexpr size_t componentCount = N;
static constexpr bool readOnly = std::is_const<T>::value;
using RawBaseType = typename std::remove_cv<T>::type;
using RawVectorType = tuple<RawBaseType, N>;
using VectorType = typename std::conditional<readOnly, const tuple<RawBaseType, N>, tuple<RawBaseType, N>>::type;
using VoidType = typename std::conditional<readOnly, const void, void>::type;
using HandleType = typename std::conditional<readOnly, ConstAttributeDataHandle, AttributeDataHandle>::type;
VectorAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), elementCount(0)
{
}
VectorAccessor(const VectorAccessor&) = default;
VectorAccessor& operator=(const VectorAccessor&) = default;
// The enable_if makes this valid only if T is const, since a non-const accessor can't
// be initialized with a const attribute handle, but a const accessor can be initialized
// from a const or non-const attribute handle.
template <typename HANDLE_TYPE,
typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstAttributeDataHandle>::value ||
std::is_same<HANDLE_TYPE, AttributeDataHandle>::value),
int>::type>
VectorAccessor(const GraphContextObj& context, HANDLE_TYPE attributeHandle, IGNORED = 0) : VectorAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
size_t componentCount = type.componentCount;
if (componentCount == N)
{
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
attributeType = baseType;
const void* pData;
context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(const void* const*)pData);
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
}
}
}
}
// The enable_if makes this valid only if T is non-const, since a non-const accessor can
// only be initialized with a non-const attribute handle.
template <typename IGNORED = typename std::enable_if<!readOnly, int>::type>
VectorAccessor(const GraphContextObj& context, AttributeDataHandle attributeHandle, IGNORED = 0) : VectorAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
size_t componentCount = type.componentCount;
if (componentCount == N)
{
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
attributeType = baseType;
void* pData;
context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(void* const*)pData);
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
}
}
}
}
// Construct a VectorAccessor by prim and attribute name.
//
// The enable_if makes this valid only if T is const, since a non-const accessor can't
// be initialized with a const attribute handle, but a const accessor can be initialized
// from a const or non-const attribute handle.
template <typename HANDLE_TYPE,
typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value ||
std::is_same<HANDLE_TYPE, BundleHandle>::value),
int>::type>
VectorAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, NameToken attributeName, IGNORED = 0)
: VectorAccessor()
{
if (!primHandle.isValid())
return;
ConstAttributeDataHandle attributeHandle;
context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &attributeName, 1);
*this = VectorAccessor(context, attributeHandle);
}
// Construct a VectorAccessor by non-const prim and attribute name.
//
// The enable_if makes this valid only if T is non-const, since a non-const accessor can
// only be initialized with a non-const attribute handle.
template <typename IGNORED = typename std::enable_if<!readOnly, int>::type>
VectorAccessor(const GraphContextObj& context, BundleHandle primHandle, NameToken attributeName, IGNORED = 0)
: VectorAccessor()
{
if (!primHandle.isValid())
return;
AttributeDataHandle attributeHandle;
context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &attributeName, 1);
*this = VectorAccessor(context, attributeHandle);
}
bool isValid() const
{
return (data != nullptr);
}
size_t getElementCount() const
{
return elementCount;
}
// Returns a pointer to the data if it's a perfect type match, else nullptr.
VectorType* getPerfectMatch() const
{
return reinterpret_cast<VectorType*>((attributeType == expectedType) ? data : nullptr);
}
RawVectorType get(size_t i = 0) const
{
CUDA_SAFE_ASSERT(data != nullptr);
CUDA_SAFE_ASSERT(i < elementCount);
if (attributeType == expectedType)
{
return reinterpret_cast<const RawVectorType*>(data)[i];
}
switch (attributeType)
{
case BaseDataType::eInt:
return RawVectorType(reinterpret_cast<const tuple<int, N>*>(data)[i]);
case BaseDataType::eInt64:
return RawVectorType(reinterpret_cast<const tuple<int64_t, N>*>(data)[i]);
case BaseDataType::eFloat:
return RawVectorType(reinterpret_cast<const tuple<float, N>*>(data)[i]);
case BaseDataType::eDouble:
return RawVectorType(reinterpret_cast<const tuple<double, N>*>(data)[i]);
}
CUDA_SAFE_ASSERT(0);
return RawVectorType(T(0));
}
// The enable_if makes this valid only if T is non-const.
template <typename IGNORED = typename std::enable_if<!readOnly, int>::type>
void set(const RawVectorType& value, size_t i = 0, IGNORED = 0) const
{
CUDA_SAFE_ASSERT(data != nullptr);
CUDA_SAFE_ASSERT(i < componentCount * elementCount);
if (attributeType == expectedType)
{
reinterpret_cast<RawVectorType*>(data)[i] = value;
}
else
{
switch (attributeType)
{
case BaseDataType::eInt:
reinterpret_cast<tuple<int, N>*>(data)[i] = tuple<int, N>(value);
break;
case BaseDataType::eInt64:
reinterpret_cast<tuple<int64_t, N>*>(data)[i] = tuple<int64_t, N>(value);
break;
case BaseDataType::eFloat:
reinterpret_cast<tuple<float, N>*>(data)[i] = tuple<float, N>(value);
break;
case BaseDataType::eDouble:
reinterpret_cast<tuple<double, N>*>(data)[i] = tuple<double, N>(value);
break;
default:
CUDA_SAFE_ASSERT(0);
break;
}
}
}
private:
BaseDataType attributeType;
VoidType* data;
size_t elementCount;
};
// Bulk scalar numeric array attribute accessor (non-const implementation).
template <typename T>
class BulkNumericAccessor
{
public:
static constexpr BaseDataType expectedType = baseDataTypeForType<T>();
static constexpr bool readOnly = false;
using RawType = typename std::remove_cv<T>::type;
using HandleType = AttributeDataHandle;
BulkNumericAccessor()
: attributeType(BaseDataType::eUnknown), data(nullptr), matchingData(nullptr), componentCount(0), elementCount(0)
{
}
BulkNumericAccessor(BulkNumericAccessor&& that)
{
attributeType = that.attributeType;
data = that.data;
matchingData = that.matchingData;
componentCount = that.componentCount;
elementCount = that.elementCount;
that.attributeType = BaseDataType::eUnknown;
that.data = nullptr;
that.matchingData = nullptr;
that.componentCount = 0;
that.elementCount = 0;
}
BulkNumericAccessor& operator=(BulkNumericAccessor&& that)
{
attributeType = that.attributeType;
data = that.data;
matchingData = that.matchingData;
componentCount = that.componentCount;
elementCount = that.elementCount;
that.attributeType = BaseDataType::eUnknown;
that.data = nullptr;
that.matchingData = nullptr;
that.componentCount = 0;
that.elementCount = 0;
return *this;
}
BulkNumericAccessor(const BulkNumericAccessor& that) = delete;
BulkNumericAccessor& operator=(const BulkNumericAccessor& that) = delete;
// This is valid only if T is non-const, since a non-const accessor can
// only be initialized with a non-const attribute handle.
BulkNumericAccessor(const GraphContextObj& context, AttributeDataHandle attributeHandle) : BulkNumericAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
attributeType = baseType;
void* pData;
context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(void* const*)pData);
componentCount = type.componentCount;
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
if (attributeType != expectedType)
{
size_t fullCount = componentCount * elementCount;
RawType* converted = new RawType[fullCount];
convertToMatching(converted, fullCount);
matchingData = converted;
}
else
{
matchingData = reinterpret_cast<T*>(data);
}
}
}
}
// Construct a BulkNumericAccessor by non-const prim and attribute name.
//
// This is valid only if T is non-const, since a non-const accessor can
// only be initialized with a non-const attribute handle.
BulkNumericAccessor(const GraphContextObj& context, BundleHandle primHandle, NameToken attributeName)
: BulkNumericAccessor()
{
if (!primHandle.isValid())
return;
AttributeDataHandle attributeHandle;
context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &attributeName, 1);
*this = BulkNumericAccessor(context, attributeHandle);
}
~BulkNumericAccessor()
{
if (attributeType != expectedType && matchingData != nullptr)
{
flushInternal(matchingData);
delete[] matchingData;
}
}
bool isValid() const
{
return (data != nullptr);
}
size_t getComponentCount() const
{
return componentCount;
}
size_t getElementCount() const
{
return elementCount;
}
// Returns a pointer to the data if it's convertible, else nullptr.
T* getData() const
{
return matchingData;
}
// Returns a pointer to the data if it's convertible, else nullptr.
// Convertible types are always treated as a perfect match for bulk conversion.
// This function is just provided for compatibility with the NumericAccessor
// and VectorAccessor classes above.
T* getPerfectMatch() const
{
return matchingData;
}
void flush()
{
flushInternal(matchingData);
}
private:
void flushInternal(RawType* matchData)
{
CUDA_SAFE_ASSERT(data != nullptr);
if (attributeType == expectedType)
{
return;
}
const size_t fullCount = componentCount * elementCount;
switch (attributeType)
{
case BaseDataType::eInt:
{
int* source = reinterpret_cast<int*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
source[i] = int(matchData[i]);
}
break;
}
case BaseDataType::eInt64:
{
int64_t* source = reinterpret_cast<int64_t*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
source[i] = int64_t(matchData[i]);
}
break;
}
case BaseDataType::eFloat:
{
float* source = reinterpret_cast<float*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
source[i] = float(matchData[i]);
}
break;
}
case BaseDataType::eDouble:
{
double* source = reinterpret_cast<double*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
source[i] = double(matchData[i]);
}
break;
}
default:
CUDA_SAFE_ASSERT(0);
break;
}
}
void convertToMatching(RawType* converted, const size_t fullCount)
{
switch (attributeType)
{
case BaseDataType::eInt:
{
const int* source = reinterpret_cast<const int*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawType(source[i]);
}
break;
}
case BaseDataType::eInt64:
{
const int64_t* source = reinterpret_cast<const int64_t*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawType(source[i]);
}
break;
}
case BaseDataType::eFloat:
{
const float* source = reinterpret_cast<const float*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawType(source[i]);
}
break;
}
case BaseDataType::eDouble:
{
const double* source = reinterpret_cast<const double*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawType(source[i]);
}
break;
}
default:
CUDA_SAFE_ASSERT(0);
break;
}
}
BaseDataType attributeType;
using VoidType = void;
VoidType* data;
T* matchingData;
size_t componentCount;
size_t elementCount;
};
// Bulk scalar numeric array attribute accessor (const implementation).
template <typename T>
class BulkNumericAccessor<const T>
{
public:
static constexpr BaseDataType expectedType = baseDataTypeForType<T>();
static constexpr bool readOnly = true;
using RawType = typename std::remove_cv<T>::type;
using HandleType = ConstAttributeDataHandle;
BulkNumericAccessor()
: attributeType(BaseDataType::eUnknown), data(nullptr), matchingData(nullptr), componentCount(0), elementCount(0)
{
}
BulkNumericAccessor(BulkNumericAccessor&& that)
{
attributeType = that.attributeType;
data = that.data;
matchingData = that.matchingData;
componentCount = that.componentCount;
elementCount = that.elementCount;
that.attributeType = BaseDataType::eUnknown;
that.data = nullptr;
that.matchingData = nullptr;
that.componentCount = 0;
that.elementCount = 0;
}
BulkNumericAccessor& operator=(BulkNumericAccessor&& that)
{
attributeType = that.attributeType;
data = that.data;
matchingData = that.matchingData;
componentCount = that.componentCount;
elementCount = that.elementCount;
that.attributeType = BaseDataType::eUnknown;
that.data = nullptr;
that.matchingData = nullptr;
that.componentCount = 0;
that.elementCount = 0;
return *this;
}
BulkNumericAccessor(const BulkNumericAccessor& that) = delete;
BulkNumericAccessor& operator=(const BulkNumericAccessor& that) = delete;
// HANDLE_TYPE can be ConstAttributeDataHandle or AttributeDataHandle.
template <typename HANDLE_TYPE>
BulkNumericAccessor(const GraphContextObj& context, HANDLE_TYPE attributeHandle) : BulkNumericAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
attributeType = baseType;
const void* pData;
context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(const void* const*)pData);
componentCount = type.componentCount;
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
if (attributeType != expectedType)
{
size_t fullCount = componentCount * elementCount;
RawType* converted = new RawType[fullCount];
convertToMatching(converted, fullCount);
matchingData = converted;
}
else
{
matchingData = reinterpret_cast<const T*>(data);
}
}
}
}
// Construct a BulkNumericAccessor by prim and attribute name.
//
// HANDLE_TYPE can be ConstAttributeDataHandle or AttributeDataHandle.
template <typename HANDLE_TYPE>
BulkNumericAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, NameToken attributeName)
: BulkNumericAccessor()
{
if (!primHandle.isValid())
return;
ConstAttributeDataHandle attributeHandle;
context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &attributeName, 1);
*this = BulkNumericAccessor(context, attributeHandle);
}
~BulkNumericAccessor()
{
}
bool isValid() const
{
return (data != nullptr);
}
size_t getComponentCount() const
{
return componentCount;
}
size_t getElementCount() const
{
return elementCount;
}
// Returns a pointer to the data if it's convertible, else nullptr.
const T* getData() const
{
return matchingData;
}
// Returns a pointer to the data if it's convertible, else nullptr.
// Convertible types are always treated as a perfect match for bulk conversion.
// This function is just provided for compatibility with the NumericAccessor
// and VectorAccessor classes above.
const T* getPerfectMatch() const
{
return matchingData;
}
private:
void convertToMatching(RawType* converted, const size_t fullCount)
{
switch (attributeType)
{
case BaseDataType::eInt:
{
const int* source = reinterpret_cast<const int*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawType(source[i]);
}
break;
}
case BaseDataType::eInt64:
{
const int64_t* source = reinterpret_cast<const int64_t*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawType(source[i]);
}
break;
}
case BaseDataType::eFloat:
{
const float* source = reinterpret_cast<const float*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawType(source[i]);
}
break;
}
case BaseDataType::eDouble:
{
const double* source = reinterpret_cast<const double*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawType(source[i]);
}
break;
}
default:
CUDA_SAFE_ASSERT(0);
break;
}
}
BaseDataType attributeType;
using VoidType = const void;
VoidType* data;
const T* matchingData;
size_t componentCount;
size_t elementCount;
};
// Bulk vector numeric array attribute accessor (non-const implementation).
template <typename T, size_t N>
class BulkVectorAccessor
{
public:
static constexpr BaseDataType expectedType = baseDataTypeForType<T>();
static constexpr size_t componentCount = N;
static constexpr bool readOnly = false;
using RawBaseType = typename std::remove_cv<T>::type;
using RawVectorType = tuple<RawBaseType, N>;
using VectorType = tuple<RawBaseType, N>;
using HandleType = AttributeDataHandle;
BulkVectorAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), matchingData(nullptr), elementCount(0)
{
}
BulkVectorAccessor(BulkVectorAccessor&& that)
{
attributeType = that.attributeType;
data = that.data;
matchingData = that.matchingData;
elementCount = that.elementCount;
that.attributeType = BaseDataType::eUnknown;
that.data = nullptr;
that.matchingData = nullptr;
that.elementCount = 0;
}
BulkVectorAccessor& operator=(BulkVectorAccessor&& that)
{
attributeType = that.attributeType;
data = that.data;
matchingData = that.matchingData;
elementCount = that.elementCount;
that.attributeType = BaseDataType::eUnknown;
that.data = nullptr;
that.matchingData = nullptr;
that.elementCount = 0;
return *this;
}
BulkVectorAccessor(const BulkVectorAccessor& that) = delete;
BulkVectorAccessor& operator=(const BulkVectorAccessor& that) = delete;
// A non-const accessor can only be initialized with a non-const attribute handle.
BulkVectorAccessor(const GraphContextObj& context, AttributeDataHandle attributeHandle) : BulkVectorAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
size_t componentCount = type.componentCount;
if (componentCount == N)
{
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
void* pData;
context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(void* const*)pData);
attributeType = baseType;
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
if (attributeType != expectedType)
{
size_t fullCount = componentCount * elementCount;
RawVectorType* converted = new RawVectorType[fullCount];
convertToMatching(converted, fullCount);
matchingData = converted;
}
else
{
matchingData = reinterpret_cast<VectorType*>(data);
}
}
}
}
}
// A non-const accessor can only be initialized with a non-const attribute handle.
BulkVectorAccessor(const GraphContextObj& context, BundleHandle bundleHandle, Token attributeName)
: BulkVectorAccessor()
{
if (!bundleHandle.isValid())
return;
AttributeDataHandle attributeHandle;
NameToken attributeNameToken(attributeName);
context.iBundle->getAttributesByNameW(&attributeHandle, context, bundleHandle, &attributeNameToken, 1);
*this = BulkVectorAccessor(context, attributeHandle);
}
~BulkVectorAccessor()
{
if (attributeType != expectedType && matchingData != nullptr)
{
flushInternal(matchingData);
delete[] matchingData;
}
}
bool isValid() const
{
return (data != nullptr);
}
size_t getElementCount() const
{
return elementCount;
}
// Returns a pointer to the data if it's convertible, else nullptr.
VectorType* getData() const
{
return matchingData;
}
// Returns a pointer to the data if it's convertible, else nullptr.
// Convertible types are always treated as a perfect match for bulk conversion.
// This function is just provided for compatibility with the NumericAccessor
// and VectorAccessor classes above.
VectorType* getPerfectMatch() const
{
return matchingData;
}
void flush()
{
flushInternal(matchingData);
}
private:
void flushInternal(RawVectorType* matchData)
{
CUDA_SAFE_ASSERT(data != nullptr);
if (attributeType == expectedType)
{
return;
}
const size_t fullCount = componentCount * elementCount;
switch (attributeType)
{
case BaseDataType::eInt:
{
tuple<int, N>* source = reinterpret_cast<tuple<int, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
source[i] = tuple<int, N>(matchData[i]);
}
break;
}
case BaseDataType::eInt64:
{
tuple<int64_t, N>* source = reinterpret_cast<tuple<int64_t, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
source[i] = tuple<int64_t, N>(matchData[i]);
}
break;
}
case BaseDataType::eFloat:
{
tuple<float, N>* source = reinterpret_cast<tuple<float, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
source[i] = tuple<float, N>(matchData[i]);
}
break;
}
case BaseDataType::eDouble:
{
tuple<double, N>* source = reinterpret_cast<tuple<double, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
source[i] = tuple<double, N>(matchData[i]);
}
break;
}
default:
CUDA_SAFE_ASSERT(0);
break;
}
}
void flushInternal(const RawVectorType* matchData)
{
// Does nothing. This signature is just for compiling purposes.
}
void convertToMatching(RawVectorType* converted, const size_t fullCount)
{
switch (attributeType)
{
case BaseDataType::eInt:
{
const tuple<int, N>* source = reinterpret_cast<const tuple<int, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawVectorType(source[i]);
}
break;
}
case BaseDataType::eInt64:
{
const tuple<int64_t, N>* source = reinterpret_cast<const tuple<int64_t, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawVectorType(source[i]);
}
break;
}
case BaseDataType::eFloat:
{
const tuple<float, N>* source = reinterpret_cast<const tuple<float, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawVectorType(source[i]);
}
break;
}
case BaseDataType::eDouble:
{
const tuple<double, N>* source = reinterpret_cast<const tuple<double, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawVectorType(source[i]);
}
break;
}
default:
CUDA_SAFE_ASSERT(0);
break;
}
}
BaseDataType attributeType;
using VoidType = void;
VoidType* data;
VectorType* matchingData;
size_t elementCount;
};
// Bulk vector numeric array attribute accessor (const implementation).
template <typename T, size_t N>
class BulkVectorAccessor<const T, N>
{
public:
static constexpr BaseDataType expectedType = baseDataTypeForType<T>();
static constexpr size_t componentCount = N;
static constexpr bool readOnly = true;
using RawBaseType = typename std::remove_cv<T>::type;
using RawVectorType = tuple<RawBaseType, N>;
using VectorType = const tuple<RawBaseType, N>;
using HandleType = ConstAttributeDataHandle;
BulkVectorAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), matchingData(nullptr), elementCount(0)
{
}
BulkVectorAccessor(BulkVectorAccessor&& that)
{
attributeType = that.attributeType;
data = that.data;
matchingData = that.matchingData;
elementCount = that.elementCount;
that.attributeType = BaseDataType::eUnknown;
that.data = nullptr;
that.matchingData = nullptr;
that.elementCount = 0;
}
BulkVectorAccessor& operator=(BulkVectorAccessor&& that)
{
attributeType = that.attributeType;
data = that.data;
matchingData = that.matchingData;
elementCount = that.elementCount;
that.attributeType = BaseDataType::eUnknown;
that.data = nullptr;
that.matchingData = nullptr;
that.elementCount = 0;
return *this;
}
BulkVectorAccessor(const BulkVectorAccessor& that) = delete;
BulkVectorAccessor& operator=(const BulkVectorAccessor& that) = delete;
// A const accessor can be initialized from a const or non-const attribute handle.
template <typename HANDLE_TYPE>
BulkVectorAccessor(const GraphContextObj& context, HANDLE_TYPE attributeHandle) : BulkVectorAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
size_t componentCount = type.componentCount;
if (componentCount == N)
{
BaseDataType baseType = type.baseType;
if (isNumericBaseType(baseType))
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
attributeType = baseType;
const void* pData;
context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(const void* const*)pData);
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
if (attributeType != expectedType)
{
size_t fullCount = componentCount * elementCount;
RawVectorType* converted = new RawVectorType[fullCount];
convertToMatching(converted, fullCount);
matchingData = converted;
}
else
{
matchingData = reinterpret_cast<VectorType*>(data);
}
}
}
}
}
// A const accessor can be initialized from a const or non-const attribute handle.
template <typename HANDLE_TYPE>
BulkVectorAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, Token attributeName)
: BulkVectorAccessor()
{
if (!primHandle.isValid())
return;
ConstAttributeDataHandle attributeHandle;
NameToken attributeNameToken(attributeName);
context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &attributeNameToken, 1);
*this = BulkVectorAccessor(context, attributeHandle);
}
~BulkVectorAccessor()
{
if (attributeType != expectedType && matchingData != nullptr)
{
delete[] matchingData;
}
}
bool isValid() const
{
return (data != nullptr);
}
size_t getElementCount() const
{
return elementCount;
}
// Returns a pointer to the data if it's convertible, else nullptr.
VectorType* getData() const
{
return matchingData;
}
// Returns a pointer to the data if it's convertible, else nullptr.
// Convertible types are always treated as a perfect match for bulk conversion.
// This function is just provided for compatibility with the NumericAccessor
// and VectorAccessor classes above.
VectorType* getPerfectMatch() const
{
return matchingData;
}
private:
void convertToMatching(RawVectorType* converted, const size_t fullCount)
{
switch (attributeType)
{
case BaseDataType::eInt:
{
const tuple<int, N>* source = reinterpret_cast<const tuple<int, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawVectorType(source[i]);
}
break;
}
case BaseDataType::eInt64:
{
const tuple<int64_t, N>* source = reinterpret_cast<const tuple<int64_t, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawVectorType(source[i]);
}
break;
}
case BaseDataType::eFloat:
{
const tuple<float, N>* source = reinterpret_cast<const tuple<float, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawVectorType(source[i]);
}
break;
}
case BaseDataType::eDouble:
{
const tuple<double, N>* source = reinterpret_cast<const tuple<double, N>*>(data);
for (size_t i = 0; i < fullCount; ++i)
{
converted[i] = RawVectorType(source[i]);
}
break;
}
default:
CUDA_SAFE_ASSERT(0);
break;
}
}
BaseDataType attributeType;
using VoidType = const void;
VoidType* data;
VectorType* matchingData;
size_t elementCount;
};
// Relationship attribute accessor.
// NOTE: This isn't yet supported.
#if 0
template <bool isReadOnly>
class RelationshipAccessor
{
public:
static constexpr BaseDataType expectedType = BaseDataType::eRelationship;
static constexpr bool readOnly = isReadOnly;
using BundleHandleType = typename std::conditional<readOnly, ConstBundleHandle, BundleHandle>::type;
using PrimHandleType [[deprecated("Use BundleHandleType!")]] = BundleHandleType;
using HandleType = typename std::conditional<readOnly, ConstAttributeDataHandle, AttributeDataHandle>::type;
RelationshipAccessor()
: data(nullptr),
elementCount(0),
m_context(nullptr),
m_attributeHandle(AttributeDataHandle::invalidValue()),
m_iAttributeData(nullptr)
{
}
RelationshipAccessor(const RelationshipAccessor&) = default;
RelationshipAccessor& operator=(const RelationshipAccessor&) = default;
RelationshipAccessor(RelationshipAccessor&& that)
{
data = that.data;
elementCount = that.componentCount;
m_context = that.m_context;
m_attributeHandle = that.m_attributeHandle;
m_iAttributeData = that.m_iAttributeData;
that.data = nullptr;
that.elementCount = 0;
that.m_context = nullptr;
that.m_attributeHandle = AttributeDataHandle(AttributeDataHandle::invalidValue());
that.m_iAttributeData = nullptr;
}
RelationshipAccessor& operator=(RelationshipAccessor&& that)
{
data = that.data;
elementCount = that.componentCount;
m_context = that.m_context;
m_attributeHandle = that.m_attributeHandle;
m_iAttributeData = that.m_iAttributeData;
that.data = nullptr;
that.elementCount = 0;
that.m_context = nullptr;
that.m_attributeHandle = AttributeDataHandle(AttributeDataHandle::invalidValue());
that.m_iAttributeData = nullptr;
}
// The enable_if makes this valid only if readOnly is true, since a non-const accessor can't
// be initialized with a const attribute handle, but a const accessor can be initialized
// from a const or non-const attribute handle.
template <typename HANDLE_TYPE,
typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstAttributeDataHandle>::value ||
std::is_same<HANDLE_TYPE, AttributeDataHandle>::value),
int>::type>
RelationshipAccessor(const GraphContextObj& context, HANDLE_TYPE attributeHandle, IGNORED = 0)
: RelationshipAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (baseType == BaseDataType::eRelationship)
{
size_t componentCount = type.componentCount;
if (componentCount == 1)
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
const void* pData;
context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(const void* const*)pData);
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
}
}
}
}
// The enable_if makes this valid only if readOnly is false, since a non-const accessor can
// only be initialized with a non-const attribute handle.
template <typename IGNORED = typename std::enable_if<!readOnly, int>::type>
RelationshipAccessor(const GraphContextObj& context, AttributeDataHandle attributeHandle, IGNORED = 0)
: RelationshipAccessor()
{
if (!attributeHandle.isValid())
return;
Type type = context.iAttributeData->getType(context, attributeHandle);
BaseDataType baseType = type.baseType;
if (baseType == BaseDataType::eRelationship)
{
size_t componentCount = type.componentCount;
if (componentCount == 1)
{
size_t depth = type.arrayDepth;
if (depth < 2)
{
void* pData;
context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1);
data = (depth == 0) ? pData : (*(void* const*)pData);
elementCount = omni::graph::core::getElementCount(context, attributeHandle);
m_context = context.context;
m_attributeHandle = attributeHandle;
m_iAttributeData = context.iAttributeData;
}
}
}
}
// Construct a RelationshipAccessor by prim and attribute name.
//
// The enable_if makes this valid only if T is const, since a non-const accessor can't
// be initialized with a const attribute handle, but a const accessor can be initialized
// from a const or non-const attribute handle.
template <typename HANDLE_TYPE,
typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value ||
std::is_same<HANDLE_TYPE, BundleHandle>::value),
int>::type>
RelationshipAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, NameToken attributeName, IGNORED = 0)
: RelationshipAccessor()
{
if (!primHandle.isValid())
return;
ConstAttributeDataHandle attributeHandle;
context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &attributeName, 1);
*this = RelationshipAccessor(context, attributeHandle);
}
// Construct a RelationshipAccessor by non-const prim and attribute name.
//
// The enable_if makes this valid only if T is non-const, since a non-const accessor can
// only be initialized with a non-const attribute handle.
template <typename IGNORED = typename std::enable_if<!readOnly, int>::type>
RelationshipAccessor(const GraphContextObj& context, BundleHandle primHandle, NameToken attributeName, IGNORED = 0)
: RelationshipAccessor()
{
if (!primHandle.isValid())
return;
AttributeDataHandle attributeHandle;
context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &attributeName, 1);
*this = RelationshipAccessor(context, attributeHandle);
}
~RelationshipAccessor()
{
if (m_context != nullptr && m_iAttributeData != nullptr && m_attributeHandle.isValid())
{
CUDA_SAFE_ASSERT(!readOnly, "Only non-const RelationshipAccessor's should update the reference counts!");
m_iAttributeData->updateRelationshipRefCounts(*m_context, m_attributeHandle);
}
}
bool isValid() const
{
return (data != nullptr);
}
size_t getElementCount() const
{
return elementCount;
}
// Returns a pointer to the data if it's a perfect type match, else nullptr.
BundleHandleType* getData() const
{
return data;
}
BundleHandleType get(size_t i = 0) const
{
CUDA_SAFE_ASSERT(data != nullptr);
CUDA_SAFE_ASSERT(i < elementCount);
return data[i];
}
// The enable_if makes this valid only if T is non-const.
template <typename IGNORED = typename std::enable_if<!readOnly, int>::type>
void set(BundleHandleType value, size_t i = 0, IGNORED = 0) const
{
CUDA_SAFE_ASSERT(data != nullptr);
CUDA_SAFE_ASSERT(i < elementCount);
// FIXME: Does reference counting of the prims need to be updated now, or will it be updated later?
data[i] = value;
}
private:
BundleHandleType* data;
size_t elementCount;
GraphContext* m_context;
AttributeDataHandle m_attributeHandle;
IAttributeData* m_iAttributeData;
};
#endif
}
}
}
|
omniverse-code/kit/include/omni/graph/core/tuple.h | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/graph/core/CudaUtils.h>
#include <carb/Defines.h>
#include <cmath>
#include <stdint.h>
#include <type_traits>
// Helper to simplify the syntax when selecting different features for PoD types.
template <class T>
struct is_trivial
{
template <class Q = T>
typename std::enable_if<std::is_trivial<Q>::value, bool>::type check()
{
return true;
}
template <class Q = T>
typename std::enable_if<!std::is_trivial<Q>::value, bool>::type check()
{
return false;
}
};
// This is a templated, fixed-component-count (N), vector class, intended to
// have data layout identical to a raw array of type T, so that data interchange
// wrappers can cast a T* to a tuple<T,N>* as appropriate, as long as they
// have ensured that there are at least N elements pointed-to.
//
// It provides a constructor for conversion between different T, but
// intentionally does not allow for conversion between different N.
// Conversion between different T is also explicit, to avoid accidental
// conversions. Some functionality similar to std::array<T,N> is also
// provided, and the data layout should be equivalent.
//
// For example, tuple<float,3> would be a vector of 3 floats.
//
// It also provides some convenience functions that allow for simple operations on the values
// as a single unit such as unary and binary operators.
//
namespace omni
{
namespace graph
{
namespace core
{
template <typename T, size_t N>
class tuple
{
public:
// Some standard types, similar to std::array and std::vector.
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = value_type*;
using const_pointer = const value_type*;
using iterator = pointer;
using const_iterator = const_pointer;
// static constexpr variable to be able to refer to the tuple size from the type,
// from outside this class definition.
static constexpr size_t tuple_size = N;
// Defaulted default constructor is needed for POD type,
// but it cannot be constexpr, since it leaves v uninitialized.
tuple() noexcept = default;
constexpr tuple(const tuple<T, N>& that) noexcept = default;
constexpr tuple& operator=(const tuple<T, N>& that) noexcept = default;
// constexpr tuple(const tuple<T, N>&& that) noexcept = default;
// constexpr tuple& operator=(const tuple<T, N>&& that) noexcept = default;
// Construct a tuple with all components equal to value.
explicit tuple(const T& value) noexcept
{
for (size_t i = 0; i < N; ++i)
{
v[i] = value;
}
}
// Type conversion constructor is explicit, to avoid unintentional conversions.
// Conversions are done on individual components.
template <typename OTHER_T>
explicit tuple(const tuple<OTHER_T, N>& that) noexcept
{
for (size_t i = 0; i < N; ++i)
{
v[i] = T(that[i]);
}
}
template <typename OTHER_T, typename... Args>
constexpr tuple(OTHER_T a, Args... args) noexcept
{
initHelper<0>(a, args...);
}
// This is a compile-time constant value
static constexpr size_type size() noexcept
{
return N;
}
// Access a single component of this tuple.
constexpr T& operator[](size_t i) noexcept
{
// Ensure that this type is a POD type if T is a POD type.
// This check is unrelated to this operator, but the static_assert
// must be inside some function that is likely to be called.
static_assert(std::is_trivial<tuple<T, N>>::value == std::is_trivial<T>::value,
"tuple<T,N> should be a POD type iff T is a POD type.");
CUDA_SAFE_ASSERT(i < N);
return v[i];
}
constexpr const T& operator[](size_t i) const noexcept
{
CUDA_SAFE_ASSERT(i < N);
return v[i];
}
// Get a pointer to the data of this tuple.
constexpr T* data() noexcept
{
return v;
}
constexpr const T* data() const noexcept
{
return v;
}
// Iterator functions for compatibility with templated code that expects an iterable container.
constexpr iterator begin() noexcept
{
return v;
}
constexpr const_iterator begin() const noexcept
{
return v;
}
constexpr const_iterator cbegin() const noexcept
{
return v;
}
constexpr iterator end() noexcept
{
return v + N;
}
constexpr const_iterator end() const noexcept
{
return v + N;
}
constexpr const_iterator cend() const noexcept
{
return v + N;
}
// Since this is a fixed-component-count vector class,
// it should never be empty, so this should always return false.
// The compiler probably won't allow N to be 0, but if it ever
// does, it would return true in that unlikely case.
static constexpr bool empty() noexcept
{
return N == 0;
}
// Add two of the vectors together
constexpr tuple& operator+=(const tuple<T, N>& that) noexcept
{
for (size_t i = 0; i < N; ++i)
{
v[i] += that[i];
}
return *this;
}
// passing lhs by value helps optimize chained a+b+c
friend tuple<T, N> operator+(tuple<T, N> lhs, const tuple<T, N>& rhs)
{
lhs += rhs;
return lhs;
}
// Subtract another vector from this one
constexpr tuple& operator-=(const tuple<T, N>& that) noexcept
{
for (size_t i = 0; i < N; ++i)
{
v[i] -= that[i];
}
return *this;
}
// passing lhs by value helps optimize chained a-b-c
friend tuple<T, N> operator-(tuple<T, N> lhs, const tuple<T, N>& rhs)
{
lhs -= rhs;
return lhs;
}
// Multiply all elements of the vector by a constant
constexpr tuple& operator*=(const T& multiplier) noexcept
{
for (size_t i = 0; i < N; ++i)
{
v[i] *= multiplier;
}
return *this;
}
// passing lhs by value helps optimize chained (a*b)*c
friend tuple<T, N> operator*(tuple<T, N> lhs, const T& multiplier)
{
lhs *= multiplier;
return lhs;
}
// Divide all elements of the vector by a constant
constexpr tuple& operator/=(const T& divisor) noexcept
{
for (size_t i = 0; i < N; ++i)
{
v[i] /= divisor;
}
return *this;
}
// passing lhs by value helps optimize chained (a/b)/c
friend tuple<T, N> operator/(tuple<T, N> lhs, const T& divisor)
{
lhs /= divisor;
return lhs;
}
// Helper function for PoD types that computes length
constexpr double length() const
{
T myLength{ (T)0 };
for (size_t i = 0; i < N; ++i)
{
myLength += operator[](i) * operator[](i);
}
return std::sqrt(myLength);
}
// Helper function for PoD types that returns a normalized version of the vector
constexpr tuple<T, N> normalized() const
{
tuple<T, N> normalizedVector{ *this };
double vectorLength = length();
// If the length is zero then the vector is zero so it can be returned directly.
if (vectorLength != 0.0)
{
for (size_t i = 0; i < N; ++i)
{
// C-style cast is needed in order to silently handle acceptable precision losses
normalizedVector[i] = T(normalizedVector[i] / vectorLength);
}
}
return normalizedVector;
}
// Uses the elementwise less-than in order from first to last for non-PoD types.
// Should only be called if the elements have that operator.
constexpr bool lessThanByElement(const tuple<T, N>& rhs) const
{
for (size_t i = 0; i < N; ++i)
{
if (operator[](i) < rhs[i])
{
return true;
}
else if (rhs[i] < operator[](i))
{
return false;
}
}
return false;
}
// Algorithm for "<" that uses the vector length for comparison. Does not use the length()
// method because for comparison puposes the expensize sqrt() is not needed.
constexpr bool lessThanByLength(const tuple<T, N>& rhs) const
{
T myLength{ (T)0 };
T rhsLength{ (T)0 };
for (size_t i = 0; i < N; ++i)
{
myLength += operator[](i) * operator[](i);
rhsLength += rhs[i] * rhs[i];
}
return myLength < rhsLength;
}
// Ordering is by vector length for PoDs and by ordering of elements in order if not
constexpr bool operator<(const tuple<T, N>& rhs) const
{
return is_trivial<T>().check() ? lessThanByLength(rhs) : lessThanByElement(rhs);
}
// Uses the elementwise less-than in order from first to last for non-PoD types.
// Should only be called if the elements have that operator.
constexpr bool greaterThanByElement(const tuple<T, N>& rhs) const
{
for (size_t i = 0; i < N; ++i)
{
if (operator[](i) > rhs[i])
{
return true;
}
else if (rhs[i] > operator[](i))
{
return false;
}
}
return false;
}
// Algorithm for "<" that uses the vector length for comparison
constexpr bool greaterThanByLength(const tuple<T, N>& rhs) const
{
T myLength{ (T)0 };
T rhsLength{ (T)0 };
for (size_t i = 0; i < N; ++i)
{
myLength += operator[](i) * operator[](i);
rhsLength += rhs[i] * rhs[i];
}
// No need to take sqrt as only the relative difference is important
return myLength > rhsLength;
}
// Ordering is by vector length for PoDs and by ordering of elements in order if not
constexpr bool operator>(const tuple<T, N>& rhs) const
{
return is_trivial<T>().check() ? greaterThanByLength(rhs) : greaterThanByElement(rhs);
}
// Equality is element-wise
constexpr bool operator==(const tuple<T, N>& rhs) const
{
for (size_t i = 0; i < N; ++i)
{
if (operator[](i) != rhs[i])
{
return false;
}
}
return true;
}
// Derivative operators, where just as efficient as separate implementation
constexpr bool operator!=(const tuple<T, N>& rhs) const
{
return !operator==(rhs);
}
constexpr bool operator>=(const tuple<T, N>& rhs) const
{
return !operator<(rhs);
}
constexpr bool operator<=(const tuple<T, N>& rhs) const
{
return !operator>(rhs);
}
private:
T v[N];
template <size_t i, typename OTHER_T>
constexpr void initHelper(OTHER_T a)
{
static_assert(i == N - 1, "Variadic constructor of tuple<T, N> requires N arguments");
v[i] = T(a);
}
template <size_t i, typename OTHER_T, typename... Args>
constexpr void initHelper(OTHER_T a, Args... args)
{
v[i] = T(a);
initHelper<i + 1>(args...);
}
};
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/Type.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/fabric/IdTypes.h>
#include <omni/fabric/Type.h>
#include <sstream>
#include <string>
// Alias fabric types into omnigraph for backwards compatibility.
// Use omni/fabric/Type.h outside of the omnigraph project
namespace omni
{
namespace graph
{
namespace core
{
//! OmniGraph Type is just an alias for the Fabric Type
using Type = omni::fabric::Type;
//! OmniGraph BaseDataType is just an alias for the Fabric BaseDataType
using BaseDataType = omni::fabric::BaseDataType;
//! OmniGraph AttributeRole is just an alias for the Fabric AttributeRole
using AttributeRole = omni::fabric::AttributeRole;
using BucketId = omni::fabric::BucketId;
// ======================================================================
/**
* @brief Get a string that describes the type role in OGN format
*
* The role name is slightly different here than @ref omni::fabric::getAttributeRoleName for historical reasons
*
* @param r Role whose name is to be returned
* @return std::string OGN-style name of the given role
*/
inline std::string getOgnRoleName(AttributeRole r)
{
static const std::string ognRoleNames[] = { "none",
"vector",
"normal",
"point",
"color",
"texcoord",
"quat",
"transform",
"frame",
"timecode",
"text",
"appliedSchema",
"primTypeName",
"execution",
"matrix",
"objectId",
"bundle",
"path",
"instancedAttribute",
"ancestorPrimTypeName",
"target",
"unknown" };
if (r <= AttributeRole::eUnknown)
{
return ognRoleNames[uint8_t(r)];
}
return std::string();
}
// ======================================================================
/**
* @brief Get a string that describes the type in OGN format
*
* OGN formats the type names slightly differently than @ref omni::fabric::getTypeName
* - the tuples are indexed "float[3]" instead of "float3"
* - the roles replace the actual name "colord[3]" instead of "double3 (color)"
*
* @param t Type definition whose name is to be returned
* @return std::string OGN-style name of the type
*/
inline std::string getOgnTypeName(Type t)
{
std::ostringstream typeName;
if (t.role == AttributeRole::eText)
{
typeName << "string";
return typeName.str();
}
if (t.role == AttributeRole::ePath)
{
typeName << "path";
return typeName.str();
}
if (t.role != AttributeRole::eNone)
{
typeName << getOgnRoleName(t.role);
// For roles with explicit types, add that to the role name
if ((t.role != AttributeRole::eTimeCode) && (t.role != AttributeRole::eTransform) &&
(t.role != AttributeRole::eFrame) && (t.role != AttributeRole::eObjectId) &&
(t.role != AttributeRole::eBundle) && (t.role != AttributeRole::eExecution) &&
(t.role != AttributeRole::eTarget))
{
switch (t.baseType)
{
case BaseDataType::eHalf:
typeName << "h";
break;
case BaseDataType::eFloat:
typeName << "f";
break;
case BaseDataType::eDouble:
typeName << "d";
break;
default:
typeName << t.baseType;
break;
}
}
}
else
{
typeName << t.baseType;
}
if (t.componentCount > 1)
{
typeName << "[" << uint32_t(t.dimension()) << "]";
}
if (t.arrayDepth == 1)
typeName << "[]";
else if (t.arrayDepth == 2)
typeName << "[][]";
return typeName.str();
}
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/IVariable2.h | // Copyright (c) 2021-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/core/IObject.h>
#include <omni/graph/core/Type.h>
#include <omni/graph/core/Handle.h>
#include <omni/graph/core/IVariable.h>
namespace omni
{
namespace graph
{
namespace core
{
OMNI_DECLARE_INTERFACE(IVariable2);
/**
* @brief Interface extension for IVariable that adds the ability to set a variable type
*/
class IVariable2_abi :
public omni::core::Inherits<omni::graph::core::IVariable, OMNI_TYPE_ID("omni.graph.core.IVariable2")>
{
protected:
/**
* Sets the type of the variable.
*
* @param[in] type New type for the variable
*
* @return True if the type is able to be set, false otherwise
*/
virtual bool setType_abi(Type type) noexcept = 0;
};
} // namespace core
} // namespace graph
} // namespace omni
#include "IVariable2.gen.h"
/**
* Implementation of IVariable setType method
*/
#ifndef DOXYGEN_BUILD
inline bool omni::graph::core::IVariable::setType(omni::graph::core::Type type) noexcept
{
auto v2 = omni::core::cast<IVariable2>(this);
if (v2)
{
return v2->setType(type);
}
else
{
return false;
}
}
#endif
|
omniverse-code/kit/include/omni/graph/core/IInternal.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Defines.h>
#include <carb/Interface.h>
#include <carb/Types.h>
#include <omni/graph/core/unstable/IPrivateNodeGraphDef.h>
#include <omni/graph/core/unstable/IPrivateNodeDef.h>
#include <omni/graph/core/Handle.h>
#include <cstddef>
namespace omni
{
namespace graph
{
namespace core
{
/**
* This class contains functions and member variables that are intended only for internal
* use by the omni.graph.core extension but which, for technical reasons, we are forced to
* expose externally.
*
* This class is explicitly exempt from the usual rules about deprecation and backwards compatibility.
* Members may be added, removed, or changed at any time without notice.
*/
struct IInternal
{
CARB_PLUGIN_INTERFACE("omni::graph::core::IInternal", 1, 2);
// NOTE: In the Python implementation all methods and members should begin with a single underscore (_)
// to further drive home that they are meant for internal use only.
/**
* Mark an attribute as deprecated, meaning that it should no longer be used and will be removed in a future version.
*
* @param[in] attrObj The attribute being deprecated.
* @param[in] message Message explaining what users should do to deal with the deprecation.
*/
void(CARB_ABI* deprecateAttribute)(const AttributeObj& attrObj, const char *message);
/**
* Factory method used to create internal generic graph definition wrapping private class
*
* @param[in] builder Graph builder requesting this construction
* @param[in] definitionName The name associated with this definition. Used by the pass pipeline
* @param[in] graphObj Authoring graph this definition belongs to
* @param[in] isInstanced Is this graph a graph instance
*/
unstable::IPrivateNodeGraphDef* (CARB_ABI* createPrivateGraphDef)(
const GraphObj& graphObj,
bool isInstanced);
/**
* Factory method used to create internal generic node definition wrapping private class
*
* @param[in] nodeObj Authoring node this definition belongs to
*/
unstable::IPrivateNodeDef*(CARB_ABI* createPrivateNodeDef)(const NodeObj& nodeObj);
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
STRUCT_INTEGRITY_CHECK(IInternal, createPrivateNodeDef, 2)
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/ArrayWrapper.h | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "GpuArray.h"
#include <omni/graph/core/iComputeGraph.h>
#include <omni/graph/core/Accessors.h>
#include <tuple>
namespace omni {
namespace graph {
namespace core {
template <typename T>
struct GpuArray;
template <typename T>
struct ConstGpuArray;
// Convenience function to retrieve an attribute data handle given the attribute. The
// attribute data handle is needed to retrieve the data of the attribute. This is the read only version.
inline ConstAttributeDataHandle getAttributeHandleR(const AttributeObj& attrObj, InstanceIndex instIndex)
{
return attrObj.iAttribute->getConstAttributeDataHandle(attrObj, instIndex);
}
// Convenience function to retrieve an attribute data handle given the attribute. The
// attribute data handle is needed to retrieve the data of the attribute. This is the writable version.
inline AttributeDataHandle getAttributeHandleW(const AttributeObj& attrObj, InstanceIndex instIndex)
{
return attrObj.iAttribute->getAttributeDataHandle(attrObj, instIndex);
}
template <typename T>
class ArrayWrapper
{
const GraphContextObj m_context = {};
const AttributeObj m_attrObj = {};
const InstanceIndex m_instIdx{ kAccordingToContextIndex };
public:
ArrayWrapper(const GraphContextObj& context,
const AttributeObj& attrObj,
InstanceIndex instanceIdx = kAccordingToContextIndex)
: m_context(context), m_attrObj(attrObj), m_instIdx(instanceIdx)
{
}
ArrayWrapper()
{
}
bool isValid(DataAccessFlags access) const
{
if (!m_context.iContext)
return false;
// for outputs it's okay for the size to be 0. So we only check for inputs here.
return access == kReadOnly ? getArrayRd() != nullptr : true;
}
// CPU buffer accessors
T* getArray()
{
return getArrayWr();
}
const T* getArrayRd() const
{
ConstAttributeDataHandle attrDataHandle = getAttributeHandleR(m_attrObj, m_instIdx);
// Technically this is a void*** at this point. Fabric stores arrays as a pointer to the actual
// buffer. The getDataR interface requires a void** , so we are forcing it to be that here.
// At the end of the call, out contains the address to the buffer, which we then dereference to
// get at the array buffer
const void** out = nullptr;
void** outPtr = reinterpret_cast<void**>(&out);
m_context.iAttributeData->getDataR((const void**)outPtr, m_context, &attrDataHandle, 1);
if (out == nullptr)
return nullptr;
return reinterpret_cast<T*>(const_cast<void*>(*out));
}
T* getArrayWr()
{
AttributeDataHandle attrDataHandle = getAttributeHandleW(m_attrObj, m_instIdx);
void** out;
// see note above about void*** forced to void**
void** outPtr = reinterpret_cast<void**>(&out);
m_context.iAttributeData->getDataW(outPtr, m_context, &attrDataHandle, 1);
if (out == nullptr)
return nullptr;
return reinterpret_cast<T*>(*out);
}
void resize(size_t newCount)
{
AttributeDataHandle attrDataHandle = getAttributeHandleW(m_attrObj, m_instIdx);
m_context.iAttributeData->setElementCount(m_context, attrDataHandle, newCount);
}
size_t size() const
{
ConstAttributeDataHandle attrDataHandle = getAttributeHandleR(m_attrObj, m_instIdx);
size_t elemCount = 0;
m_context.iAttributeData->getElementCount(&elemCount, m_context, &attrDataHandle, 1);
return elemCount;
}
bool empty() const
{
return size() == 0;
}
// GPU buffer accessors
GpuArray<T> getArrayGPU()
{
return getArrayWrGPU();
}
ConstGpuArray<T> getArrayRdGPU() const
{
ConstAttributeDataHandle attrDataHandle = getAttributeHandleR(m_attrObj, m_instIdx);
const void** out = nullptr;
void** outPtr = reinterpret_cast<void**>(&out);
m_context.iAttributeData->getDataRGPU((const void**)outPtr, m_context, &attrDataHandle, 1);
auto array = (T* const*)(out);
return ConstGpuArray<T>{ array, sizeGPU() };
}
GpuArray<T> getArrayWrGPU()
{
AttributeDataHandle attrDataHandle = getAttributeHandleW(m_attrObj, m_instIdx);
void** out;
void** outPtr = reinterpret_cast<void**>(&out);
m_context.iAttributeData->getDataWGPU(outPtr, m_context, &attrDataHandle, 1);
auto array = (T* const*)(out);
return GpuArray<T>{ array, sizeGPU() };
}
// GPU size accessor
const size_t* sizeGPU() const
{
// We don't support GPU resizing array
return nullptr;
}
};
}
}
}
|
omniverse-code/kit/include/omni/graph/core/BundleAttribImpl.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BundleAttrib.h"
#include "ConstBundlePrims.h"
#include <omni/graph/core/iComputeGraph.h>
#include <omni/graph/core/CppWrappers.h>
#include <omni/math/linalg/vec.h>
#include <omni/math/linalg/matrix.h>
#include <omni/math/linalg/quat.h>
#include <omni/math/linalg/half.h>
namespace omni
{
namespace math
{
namespace linalg
{
template <typename T>
struct TypeToBaseType
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUnknown;
};
template <>
struct TypeToBaseType<half>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eHalf;
};
template <>
struct TypeToBaseType<float>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eFloat;
};
template <>
struct TypeToBaseType<double>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eDouble;
};
template <>
struct TypeToBaseType<bool>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eBool;
};
template <>
struct TypeToBaseType<unsigned char>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUChar;
};
template <>
struct TypeToBaseType<int>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eInt;
};
template <>
struct TypeToBaseType<int64_t>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eInt64;
};
template <>
struct TypeToBaseType<unsigned int>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUInt;
};
template <>
struct TypeToBaseType<uint64_t>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUInt64;
};
template <>
struct TypeToBaseType<omni::fabric::Token>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eToken;
};
template <typename T, size_t N>
struct TypeToBaseType<base_vec<T, N>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<vec2<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<vec3<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<vec4<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<quat<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T, size_t N>
struct TypeToBaseType<base_matrix<T, N>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<matrix2<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<matrix3<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<matrix4<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToComponentCount
{
constexpr static size_t count = 1;
};
template <typename T, size_t N>
struct TypeToComponentCount<base_vec<T,N>>
{
constexpr static size_t count = N;
};
template <typename T>
struct TypeToComponentCount<vec2<T>>
{
constexpr static size_t count = 2;
};
template <typename T>
struct TypeToComponentCount<vec3<T>>
{
constexpr static size_t count = 3;
};
template <typename T>
struct TypeToComponentCount<vec4<T>>
{
constexpr static size_t count = 4;
};
template <typename T>
struct TypeToComponentCount<quat<T>>
{
constexpr static size_t count = 4;
};
template <typename T, size_t N>
struct TypeToComponentCount<base_matrix<T,N>>
{
constexpr static size_t count = N*N;
};
template <typename T>
struct TypeToComponentCount<matrix2<T>>
{
constexpr static size_t count = 4;
};
template <typename T>
struct TypeToComponentCount<matrix3<T>>
{
constexpr static size_t count = 9;
};
template <typename T>
struct TypeToComponentCount<matrix4<T>>
{
constexpr static size_t count = 16;
};
} // namespace linalg
} // namespace math
} // namespace omni
namespace omni
{
namespace graph
{
namespace core
{
namespace detail
{
//
// Non-owning string buffer with compile time size evaluation
//
class StringBuffer
{
public:
using value_type = char const*;
using size_type = std::size_t;
using const_iterator = char const*;
constexpr StringBuffer(value_type data, size_type size) noexcept : m_data{ data }, m_size{ size }
{
}
constexpr explicit StringBuffer(value_type data) noexcept : StringBuffer{ data, len(data) }
{
}
constexpr StringBuffer(StringBuffer const&) = default;
constexpr StringBuffer(StringBuffer&&) = default;
constexpr value_type data() const noexcept
{
return m_data;
}
constexpr size_type size() const noexcept
{
return m_size;
}
constexpr const_iterator begin() const noexcept
{
return m_data;
}
constexpr const_iterator end() const noexcept
{
return m_data + m_size;
}
private:
constexpr size_type len(value_type start) const noexcept
{
value_type end = start;
for (; *end != '\0'; ++end)
;
return end - start;
}
value_type m_data;
size_type m_size;
};
// Helper class to keep name and type together.
struct AttrDefinition
{
AttrDefinition(StringBuffer _name, omni::graph::core::Type _type, omni::graph::core::NameToken _token) noexcept
: name{ _name }
, type{ _type }
, token{ _token }
{
}
AttrDefinition(omni::fabric::IToken const* iToken, char const* _text, omni::graph::core::Type _type) noexcept
: AttrDefinition{ StringBuffer{_text}, _type, iToken->getHandle(_text) }
{
}
AttrDefinition(AttrDefinition const&) = delete;
AttrDefinition(AttrDefinition&&) = delete;
AttrDefinition& operator=(AttrDefinition const&) = delete;
AttrDefinition& operator=(AttrDefinition&&) = delete;
StringBuffer name; // Name and size of the attribute
omni::graph::core::Type type; // Type of the attribute
omni::graph::core::NameToken token; // Token representation of the name
};
// Attribute Level Definitions:
inline AttrDefinition const& getAttrInterpolationDefinition() noexcept
{
using namespace omni::fabric;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "interpolation", Type{ BaseDataType::eToken, 1, 0 } };
return d;
}
inline AttrDefinition const& getAttrSourceDefinition() noexcept
{
using namespace omni::fabric;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "source", Type{ BaseDataType::eUChar, 1, 0 } };
return d;
}
// Primitive Level Definitions:
inline AttrDefinition const& getPrimIndexDefinition() noexcept
{
using namespace omni::fabric;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primIndex", Type{ BaseDataType::eUInt64, 1, 0 } };
return d;
}
inline AttrDefinition const& getPrimPathDefinition() noexcept
{
using namespace omni::fabric;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "sourcePrimPath", Type{ BaseDataType::eToken, 1, 0 } };
return d;
}
inline AttrDefinition const& getPrimTypeDefinition() noexcept
{
using namespace omni::fabric;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "sourcePrimType", Type{ BaseDataType::eToken, 1, 0 } };
return d;
}
// Bundle Level Definitions:
inline AttrDefinition const& getBundlePrimIndexOffsetDefinition() noexcept
{
using namespace omni::fabric;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "bundlePrimIndexOffset", Type{ BaseDataType::eUInt64, 1, 0 } };
return d;
}
// Constant types.
constexpr omni::graph::core::Type s_relationshipType{ omni::graph::core::BaseDataType::eToken, 1, 1 };
} // namespace detail
inline bool BundleAttrib::isRelationshipData() const noexcept
{
return m_source == Source::Relationship && type() == detail::s_relationshipType;
}
inline bool BundleAttrib::setInterpolation(omni::graph::core::NameToken interpolation) noexcept
{
using namespace omni::graph::core;
if (m_interpolation == interpolation)
return true;
if (interpolation == omni::fabric::kUninitializedToken)
{
clearInterpolation();
return true;
}
if (IBundle2* bundle = getBundlePtr())
{
auto& interpDef = detail::getAttrInterpolationDefinition();
AttributeDataHandle interpolationAttr = bundle->getAttributeMetadataByName(m_name, interpDef.token);
if (!interpolationAttr.isValid())
{
interpolationAttr = bundle->createAttributeMetadata(m_name, interpDef.token, interpDef.type);
}
m_interpolation = interpolation;
auto context = bundle->getContext();
*getDataW<NameToken>(context, interpolationAttr) = interpolation;
return true;
}
return false;
}
inline bool BundleAttrib::setSource(Source source) noexcept
{
using namespace omni::graph::core;
if(m_source == source)
return true;
if (IBundle2* bundle = getBundlePtr())
{
auto& sourceDef = detail::getAttrSourceDefinition();
AttributeDataHandle sourceAttr = bundle->getAttributeMetadataByName(m_name, sourceDef.token);
if(!sourceAttr.isValid())
{
sourceAttr = bundle->createAttributeMetadata(m_name, sourceDef.token, sourceDef.type);
}
m_source = source;
auto context = bundle->getContext();
*omni::graph::core::getDataW<SourceType>(context, sourceAttr) = static_cast<SourceType>(source);
return true;
}
return false;
}
inline void BundleAttrib::copyContentsFrom(BundleAttrib const& sourceAttr) noexcept
{
using namespace omni::graph::core;
IBundle2* dstBundle = getBundlePtr();
IConstBundle2* srcBundle = sourceAttr.getConstBundlePtr();
if (!dstBundle)
{
return;
}
auto context = dstBundle->getContext();
// Copy Attribute
AttributeDataHandle dstAttrHandle = dstBundle->getAttributeByName(m_name);
ConstAttributeDataHandle srcAttrHandle = srcBundle->getConstAttributeByName(sourceAttr.m_name);
// Ensure that copyData updated the type correctly, if needed.
CARB_ASSERT(context.iAttributeData->getType(context, dstAttrHandle) == Type(m_type));
context.iAttributeData->copyData(dstAttrHandle, context, srcAttrHandle);
// Copy the cached type
m_type = sourceAttr.m_type;
// Copy the interpolation (does nothing if the same; clears interpolation if none on sourceAttr)
setInterpolation(sourceAttr.interpolation());
// Copy source
setSource(sourceAttr.m_source);
}
inline void BundleAttrib::clearInterpolation() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
auto context = bundle->getContext();
auto& interpDef = detail::getAttrInterpolationDefinition();
bundle->removeAttributeMetadata(m_name, interpDef.token);
m_interpolation = omni::fabric::kUninitializedToken;
}
}
inline ConstBundlePrim* BundleAttrib::getBundlePrim() const noexcept
{
return m_bundlePrim;
}
inline void BundleAttrib::clearSource() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
auto context = bundle->getContext();
auto& sourceDef = detail::getAttrSourceDefinition();
bundle->removeAttributeMetadata(m_name, sourceDef.token);
m_source = BundleAttribSource::Attribute;
}
}
inline omni::graph::core::NameToken BundleAttrib::name() const noexcept
{
return m_name;
}
inline omni::graph::core::NameToken BundleAttrib::interpolation() const noexcept
{
return m_interpolation;
}
inline omni::graph::core::Type BundleAttrib::type() const noexcept
{
return omni::graph::core::Type(m_type);
}
inline bool BundleAttrib::isArray() const noexcept
{
omni::graph::core::Type type{ m_type };
CARB_ASSERT(type.arrayDepth < 2);
return (type.arrayDepth != 0);
}
inline BundleAttrib::Source BundleAttrib::source() const noexcept
{
return m_source;
}
inline bool BundleAttrib::isAttributeData() const noexcept
{
return m_source == Source::Attribute;
}
inline omni::graph::core::NameToken BundleAttrib::prefixedName() const noexcept
{
return m_name;
}
inline size_t BundleAttrib::size() const noexcept
{
using namespace omni::graph::core;
if (!isArray())
{
return 1;
}
IConstBundle2* bundle = getConstBundlePtr();
auto context = bundle->getContext();
ConstAttributeDataHandle attr = bundle->getConstAttributeByName(m_name);
size_t count;
context.iAttributeData->getElementCount(&count, context, &attr, 1);
return count;
}
inline void BundleAttrib::resize(size_t arrayElementCount) noexcept
{
using namespace omni::graph::core;
CARB_ASSERT(isArray());
if (IBundle2* bundle = getBundlePtr())
{
auto context = bundle->getContext();
AttributeDataHandle attr = bundle->getAttributeByName(m_name);
context.iAttributeData->setElementCount(context, attr, arrayElementCount);
}
}
inline void* BundleAttrib::getDataInternal() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
auto context = bundle->getContext();
AttributeDataHandle attr = bundle->getAttributeByName(m_name);
if (Type(m_type).arrayDepth == 0)
{
return getDataW<void>(context, attr);
}
return *getDataW<void*>(context, attr);
}
return nullptr;
}
inline void const* BundleAttrib::getDataInternal() const noexcept
{
using namespace omni::graph::core;
IConstBundle2* constBundle = getConstBundlePtr();
GraphContextObj context = constBundle->getContext();
ConstAttributeDataHandle attr = constBundle->getConstAttributeByName(m_name);
if (Type(m_type).arrayDepth == 0)
{
return getDataR<void const>(context, attr);
}
return *getDataR<void const*>(context, attr);
}
inline omni::graph::core::AttributeDataHandle BundleAttrib::handle() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
return AttributeDataHandle(AttrKey(bundle->getHandle(), m_name.token));
}
return AttributeDataHandle{ AttributeDataHandle::invalidValue() };
}
inline omni::graph::core::ConstAttributeDataHandle BundleAttrib::handle() const noexcept
{
using namespace omni::graph::core;
if(IConstBundle2* bundle = getConstBundlePtr())
{
return ConstAttributeDataHandle{ AttrKey(bundle->getConstHandle(), m_name.token) };
}
return ConstAttributeDataHandle{ ConstAttributeDataHandle::invalidValue() };
}
template <typename T>
T* BundleAttrib::getData() noexcept
{
// It must be valid to request a pointer to type T.
// requesting a float* or vec3f* for a vec3f type is valid, but double* or vec2f* is not.
using namespace omni::math::linalg;
using Type = omni::graph::core::Type;
bool const isSameBaseType = TypeToBaseType<T>::baseType == Type(m_type).baseType;
bool const isSameCount = TypeToComponentCount<T>::count == Type(m_type).componentCount;
bool const isValidCast = isSameBaseType && (TypeToComponentCount<T>::count == 1 || isSameCount);
return isValidCast ? reinterpret_cast<T*>(getDataInternal()) : nullptr;
}
template <typename T>
T const* BundleAttrib::getData() const noexcept
{
// It must be valid to request a pointer to type T.
// requesting a float* or vec3f* for a vec3f type is valid, but double* or vec2f* is not.
using namespace omni::math::linalg;
using Type = omni::graph::core::Type;
bool const isValidCast =
TypeToBaseType<T>::baseType == Type(m_type).baseType &&
(TypeToComponentCount<T>::count == 1 ||
TypeToComponentCount<T>::count == Type(m_type).componentCount);
return isValidCast ? reinterpret_cast<T const*>(getDataInternal()) : nullptr;
}
template <typename T>
T const* BundleAttrib::getConstData() const noexcept
{
return getData<T>();
}
template <typename T>
T BundleAttrib::get() const noexcept
{
using namespace omni::math::linalg;
// TODO: Figure out how to support array attributes here.
CARB_ASSERT(omni::graph::core::Type(m_type).arrayDepth == 0);
// This has stronger requirements than getData, since get<float>() isn't valid
// for a vec3f attribute, but getData<float>() is valid for a vec3f attribute.
CARB_ASSERT(TypeToComponentCount<T>::count == omni::graph::core::Type(m_type).componentCount);
return *getConstData<T>();
}
template <typename T>
void BundleAttrib::set(T const& value) noexcept
{
using namespace omni::math::linalg;
CARB_ASSERT(omni::graph::core::Type(m_type).arrayDepth == 0);
// This has stronger requirements than getData, since set(1.0f) isn't valid
// for a vec3f attribute, but getData<float>() is valid for a vec3f attribute.
CARB_ASSERT(TypeToComponentCount<T>::count == omni::graph::core::Type(m_type).componentCount);
*getData<T>() = value;
}
template <typename T>
void BundleAttrib::set(T const* values, size_t elementCount) noexcept
{
using namespace omni::math::linalg;
CARB_ASSERT(omni::graph::core::Type(m_type).arrayDepth == 1);
// This has stronger requirements than getData, since set(float const*,size_t) isn't valid
// for a vec3f attribute, but getData<float>() is valid for a vec3f attribute.
CARB_ASSERT(TypeToComponentCount<T>::count == omni::graph::core::Type(m_type).componentCount);
resize(elementCount);
if (elementCount > 0)
{
T* p = getData<T>();
for (size_t i = 0; i < elementCount; ++i)
{
p[i] = values[i];
}
}
}
inline void BundleAttrib::clearContents() noexcept
{
using namespace omni::graph::core;
/**
* Remove attribute. Its metadata will be removed automatically together with it.
*/
IBundle2* bundle = getBundlePtr();
bundle->removeAttributeByName(m_name);
/**
* Invalidate data.
*/
m_source = BundleAttribSource::Attribute;
m_interpolation = omni::fabric::kUninitializedToken;
m_type = omni::fabric::kUnknownType;
m_name = omni::fabric::kUninitializedToken;
m_bundlePrim = nullptr;
}
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/NodeTypeRegistryTemplates.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#ifndef NODE_TYPE_REGISTRY
# error This file can only be included indirectly from NodeTypeRegistrar.h
#endif
#include "iComputeGraph.h"
#include <type_traits>
//======================================================================
// Template metaprogramming that allows the node registration to figure out at compile time
// which functions a node has that can be registered. Functions come from the INodeType
// interface in iComputeGraph.h.
template <typename>
struct sfinae_true : std::true_type
{
};
template <typename>
struct sfinae_false : std::false_type
{
};
// Template collection to provide a pointer to the static method getNodeType() if it exists
// Usage: auto getNodeTypeFn = getNodeTypeFunction<NodeClass>();
using GetNodeTypeFunction = std::add_pointer<const char*()>::type;
template <typename NodeType>
constexpr auto _checkGetNodeTypeFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkGetNodeTypeFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().getNodeType())>;
template <typename NodeType>
struct _hasGetNodeTypeFunction : decltype(_checkGetNodeTypeFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasGetNodeTypeFunction<NodeType>::value, bool>::type = 0>
static inline GetNodeTypeFunction getNodeTypeFunction()
{
return &NodeType::getNodeType;
};
template <typename NodeType, typename std::enable_if<!_hasGetNodeTypeFunction<NodeType>::value, bool>::type = 0>
static inline GetNodeTypeFunction getNodeTypeFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method compute() if it exists
// Usage: auto computeFn = computeFunction<NodeClass>();
using ComputeFunction = std::add_pointer<bool(const GraphContextObj&, const NodeObj&)>::type;
template <typename NodeType>
constexpr auto _checkComputeFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkComputeFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().compute(std::declval<const GraphContextObj&>(),
std::declval<const NodeObj&>()))>;
template <typename NodeType>
struct _hasComputeFunction : decltype(_checkComputeFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasComputeFunction<NodeType>::value, bool>::type = 0>
static inline ComputeFunction computeFunction()
{
return &NodeType::compute;
};
template <typename NodeType, typename std::enable_if<!_hasComputeFunction<NodeType>::value, bool>::type = 0>
static inline ComputeFunction computeFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method initialize() if it exists
// Usage: auto initializeFn = initializeFunction<NodeClass>();
using InitializeFunction = std::add_pointer<void(const GraphContextObj&, const NodeObj&)>::type;
template <typename NodeType>
constexpr auto _checkInitializeFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkInitializeFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().initialize(std::declval<const GraphContextObj&>(),
std::declval<const NodeObj&>()))>;
template <typename NodeType>
struct _hasInitializeFunction : decltype(_checkInitializeFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasInitializeFunction<NodeType>::value, bool>::type = 0>
static inline InitializeFunction initializeFunction()
{
return &NodeType::initialize;
};
template <typename NodeType, typename std::enable_if<!_hasInitializeFunction<NodeType>::value, bool>::type = 0>
static inline InitializeFunction initializeFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method release() if it exists
// Usage: auto releaseFn = releaseFunction<NodeClass>();
using ReleaseFunction = std::add_pointer<void(const NodeObj&)>::type;
template <typename NodeType>
constexpr auto _checkReleaseFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkReleaseFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().release(std::declval<const NodeObj&>()))>;
template <typename NodeType>
struct _hasReleaseFunction : decltype(_checkReleaseFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasReleaseFunction<NodeType>::value, bool>::type = 0>
static inline ReleaseFunction releaseFunction()
{
return &NodeType::release;
};
template <typename NodeType, typename std::enable_if<!_hasReleaseFunction<NodeType>::value, bool>::type = 0>
static inline ReleaseFunction releaseFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method initializeType() if it exists
// Usage: auto initializeTypeFn = initializeTypeFunction<NodeClass>();
using InitializeTypeFunction = std::add_pointer<void(const NodeTypeObj&)>::type;
template <typename NodeType>
constexpr auto _checkInitializeTypeFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkInitializeTypeFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().initializeType(std::declval<const NodeTypeObj&>()))>;
template <typename NodeType>
struct _hasInitializeTypeFunction : decltype(_checkInitializeTypeFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasInitializeTypeFunction<NodeType>::value, bool>::type = 0>
static inline InitializeTypeFunction initializeTypeFunction()
{
return &NodeType::initializeType;
};
template <typename NodeType, typename std::enable_if<!_hasInitializeTypeFunction<NodeType>::value, bool>::type = 0>
static inline InitializeTypeFunction initializeTypeFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method updateNodeVersion() if it exists
// Usage: auto updateNodeVersionFn = updateNodeVersionFunction<NodeClass>();
using UpdateNodeVersionFunction = std::add_pointer<bool(const GraphContextObj&, const NodeObj&, int, int)>::type;
template <typename NodeType>
constexpr auto _checkUpdateNodeVersionFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkUpdateNodeVersionFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().updateNodeVersion(
std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<int>(), std::declval<int>()))>;
template <typename NodeType>
struct _hasUpdateNodeVersionFunction : decltype(_checkUpdateNodeVersionFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasUpdateNodeVersionFunction<NodeType>::value, bool>::type = 0>
static inline UpdateNodeVersionFunction updateNodeVersionFunction()
{
return &NodeType::updateNodeVersion;
};
template <typename NodeType, typename std::enable_if<!_hasUpdateNodeVersionFunction<NodeType>::value, bool>::type = 0>
static inline UpdateNodeVersionFunction updateNodeVersionFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method addInput() if it exists
// Usage: auto addInputFn = addInputFunction<NodeClass>();
using AddInputFunction =
std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*)>::type;
template <typename NodeType>
constexpr auto _checkAddInputFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkAddInputFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().addInput(std::declval<const NodeTypeObj&>(),
std::declval<const char*>(),
std::declval<const char*>(),
std::declval<bool>(),
std::declval<const void*>(),
std::declval<const size_t*>()))>;
template <typename NodeType>
struct _hasAddInputFunction : decltype(_checkAddInputFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasAddInputFunction<NodeType>::value, bool>::type = 0>
static inline AddInputFunction addInputFunction()
{
return &NodeType::addInput;
};
template <typename NodeType, typename std::enable_if<!_hasAddInputFunction<NodeType>::value, bool>::type = 0>
static inline AddInputFunction addInputFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method addExtendedInput() if it exists
// Usage: auto addExtendedInputFn = addExtendedInputFunction<NodeClass>();
using AddExtendedInputFunction =
std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType)>::type;
template <typename NodeType>
constexpr auto _checkAddExtendedInputFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkAddExtendedInputFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().addExtendedInput(std::declval<const NodeTypeObj&>(),
std::declval<const char*>(),
std::declval<const char*>(),
std::declval<bool>(),
std::declval<ExtendedAttributeType>()))>;
template <typename NodeType>
struct _hasAddExtendedInputFunction : decltype(_checkAddExtendedInputFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasAddExtendedInputFunction<NodeType>::value, bool>::type = 0>
static inline AddExtendedInputFunction addExtendedInputFunction()
{
return &NodeType::addExtendedInput;
};
template <typename NodeType, typename std::enable_if<!_hasAddExtendedInputFunction<NodeType>::value, bool>::type = 0>
static inline AddExtendedInputFunction addExtendedInputFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method addOutput() if it exists
// Usage: auto addOutputFn = addOutputFunction<NodeClass>();
using AddOutputFunction =
std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*)>::type;
template <typename NodeType>
constexpr auto _checkAddOutputFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkAddOutputFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().addOutput(std::declval<const NodeTypeObj&>(),
std::declval<const char*>(),
std::declval<const char*>(),
std::declval<bool>(),
std::declval<const void*>(),
std::declval<const size_t*>()))>;
template <typename NodeType>
struct _hasAddOutputFunction : decltype(_checkAddOutputFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasAddOutputFunction<NodeType>::value, bool>::type = 0>
static inline AddOutputFunction addOutputFunction()
{
return &NodeType::addOutput;
};
template <typename NodeType, typename std::enable_if<!_hasAddOutputFunction<NodeType>::value, bool>::type = 0>
static inline AddOutputFunction addOutputFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method addExtendedOutput() if it exists
// Usage: auto addExtendedOutputFn = addExtendedOutputFunction<NodeClass>();
using AddExtendedOutputFunction =
std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType)>::type;
template <typename NodeType>
constexpr auto _checkAddExtendedOutputFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkAddExtendedOutputFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().addExtendedOutput(std::declval<const NodeTypeObj&>(),
std::declval<const char*>(),
std::declval<const char*>(),
std::declval<bool>(),
std::declval<ExtendedAttributeType>()))>;
template <typename NodeType>
struct _hasAddExtendedOutputFunction : decltype(_checkAddExtendedOutputFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasAddExtendedOutputFunction<NodeType>::value, bool>::type = 0>
static inline AddExtendedOutputFunction addExtendedOutputFunction()
{
return &NodeType::addExtendedOutput;
};
template <typename NodeType, typename std::enable_if<!_hasAddExtendedOutputFunction<NodeType>::value, bool>::type = 0>
static inline AddExtendedOutputFunction addExtendedOutputFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method addState() if it exists
// Usage: auto addStateFn = addStateFunction<NodeClass>();
using AddStateFunction =
std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*)>::type;
template <typename NodeType>
constexpr auto _checkAddStateFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkAddStateFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().addState(std::declval<const NodeTypeObj&>(),
std::declval<const char*>(),
std::declval<const char*>(),
std::declval<bool>(),
std::declval<const void*>(),
std::declval<const size_t*>()))>;
template <typename NodeType>
struct _hasAddStateFunction : decltype(_checkAddStateFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasAddStateFunction<NodeType>::value, bool>::type = 0>
static inline AddStateFunction addStateFunction()
{
return &NodeType::addState;
};
template <typename NodeType, typename std::enable_if<!_hasAddStateFunction<NodeType>::value, bool>::type = 0>
static inline AddStateFunction addStateFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method addExtendedState() if it exists
// Usage: auto addExtendedStateFn = addExtendedStateFunction<NodeClass>();
using AddExtendedStateFunction =
std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType)>::type;
template <typename NodeType>
constexpr auto _checkAddExtendedStateFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkAddExtendedStateFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().addExtendedState(std::declval<const NodeTypeObj&>(),
std::declval<const char*>(),
std::declval<const char*>(),
std::declval<bool>(),
std::declval<ExtendedAttributeType>()))>;
template <typename NodeType>
struct _hasAddExtendedStateFunction : decltype(_checkAddExtendedStateFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasAddExtendedStateFunction<NodeType>::value, bool>::type = 0>
static inline AddExtendedStateFunction addExtendedStateFunction()
{
return &NodeType::addExtendedState;
};
template <typename NodeType, typename std::enable_if<!_hasAddExtendedStateFunction<NodeType>::value, bool>::type = 0>
static inline AddExtendedStateFunction addExtendedStateFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method hasState() if it exists
// Usage: auto hasStateFn = hasStateFunction<NodeClass>();
using HasStateFunction = std::add_pointer<bool(const NodeTypeObj&)>::type;
template <typename NodeType>
constexpr auto _checkHasStateFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkHasStateFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().hasState(std::declval<const NodeTypeObj&>()))>;
template <typename NodeType>
struct _hasHasStateFunction : decltype(_checkHasStateFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasHasStateFunction<NodeType>::value, bool>::type = 0>
static inline HasStateFunction hasStateFunction()
{
return &NodeType::hasState;
};
template <typename NodeType, typename std::enable_if<!_hasHasStateFunction<NodeType>::value, bool>::type = 0>
static inline HasStateFunction hasStateFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method registerTasks() if it exists
// Usage: auto registerTasksFn = registerTasksFunction<NodeClass>();
using RegisterTasksFunction = std::add_pointer<void()>::type;
template <typename NodeType>
constexpr auto _checkRegisterTasksFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkRegisterTasksFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().registerTasks())>;
template <typename NodeType>
struct _hasRegisterTasksFunction : decltype(_checkRegisterTasksFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasRegisterTasksFunction<NodeType>::value, bool>::type = 0>
static inline RegisterTasksFunction registerTasksFunction()
{
return &NodeType::registerTasks;
};
template <typename NodeType, typename std::enable_if<!_hasRegisterTasksFunction<NodeType>::value, bool>::type = 0>
static inline RegisterTasksFunction registerTasksFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method getAllMetadata() if it exists
// Usage: auto getAllMetadataFn = getAllMetadataFunction<NodeClass>();
using GetAllMetadataFunction =
std::add_pointer<size_t(const NodeTypeObj& nodeType, const char**, const char**, size_t)>::type;
template <typename NodeType>
constexpr auto _checkGetAllMetadataFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkGetAllMetadataFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().getAllMetadata(
std::declval<const NodeTypeObj&>(), std::declval<const char**>(), std::declval<const char**>(), std::declval<size_t>()))>;
template <typename NodeType>
struct _hasGetAllMetadataFunction : decltype(_checkGetAllMetadataFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasGetAllMetadataFunction<NodeType>::value, bool>::type = 0>
static inline GetAllMetadataFunction getAllMetadataFunction()
{
return &NodeType::getAllMetadata;
};
template <typename NodeType, typename std::enable_if<!_hasGetAllMetadataFunction<NodeType>::value, bool>::type = 0>
static inline GetAllMetadataFunction getAllMetadataFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method getMetadata() if it exists
// Usage: auto getMetadataFn = getMetadataFunction<NodeClass>();
using GetMetadataFunction = std::add_pointer<const char*(const NodeTypeObj& nodeType, const char*)>::type;
template <typename NodeType>
constexpr auto _checkGetMetadataFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkGetMetadataFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().getMetadata(std::declval<const NodeTypeObj&>(),
std::declval<const char*>()))>;
template <typename NodeType>
struct _hasGetMetadataFunction : decltype(_checkGetMetadataFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasGetMetadataFunction<NodeType>::value, bool>::type = 0>
static inline GetMetadataFunction getMetadataFunction()
{
return &NodeType::getMetadata;
};
template <typename NodeType, typename std::enable_if<!_hasGetMetadataFunction<NodeType>::value, bool>::type = 0>
static inline GetMetadataFunction getMetadataFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method getMetadataCount() if it exists
// Usage: auto getMetadataCountFn = getMetadataCountFunction<NodeClass>();
using GetMetadataCountFunction = std::add_pointer<size_t(const NodeTypeObj& nodeType)>::type;
template <typename NodeType>
constexpr auto _checkGetMetadataCountFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkGetMetadataCountFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().getMetadataCount(std::declval<const NodeTypeObj&>()))>;
template <typename NodeType>
struct _hasGetMetadataCountFunction : decltype(_checkGetMetadataCountFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasGetMetadataCountFunction<NodeType>::value, bool>::type = 0>
static inline GetMetadataCountFunction getMetadataCountFunction()
{
return &NodeType::getMetadataCount;
};
template <typename NodeType, typename std::enable_if<!_hasGetMetadataCountFunction<NodeType>::value, bool>::type = 0>
static inline GetMetadataCountFunction getMetadataCountFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method setMetadata() if it exists
// Usage: auto setMetadataFn = setMetadataFunction<NodeClass>();
using SetMetadataFunction = std::add_pointer<void(const NodeTypeObj& nodeType, const char*, const char*)>::type;
template <typename NodeType>
constexpr auto _checkSetMetadataFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkSetMetadataFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().setMetadata(
std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>()))>;
template <typename NodeType>
struct _hasSetMetadataFunction : decltype(_checkSetMetadataFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasSetMetadataFunction<NodeType>::value, bool>::type = 0>
static inline SetMetadataFunction setMetadataFunction()
{
return &NodeType::setMetadata;
};
template <typename NodeType, typename std::enable_if<!_hasSetMetadataFunction<NodeType>::value, bool>::type = 0>
static inline SetMetadataFunction setMetadataFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method getScheduleNodeCount() if it exists
// Usage: auto getScheduleNodeCountFn = getScheduleNodeCountFunction<NodeClass>();
using GetScheduleNodeCountFunction =
std::add_pointer<size_t(const GraphContextObj&, const NodeObj&, const ScheduleNodeObj*, size_t)>::type;
template <typename NodeType>
constexpr auto _checkGetScheduleNodeCountFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkGetScheduleNodeCountFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().getScheduleNodeCount(std::declval<const GraphContextObj&>(),
std::declval<const NodeObj&>(),
std::declval<const ScheduleNodeObj*>(),
std::declval<size_t>()))>;
template <typename NodeType>
struct _hasGetScheduleNodeCountFunction : decltype(_checkGetScheduleNodeCountFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasGetScheduleNodeCountFunction<NodeType>::value, bool>::type = 0>
static inline GetScheduleNodeCountFunction getScheduleNodeCountFunction()
{
return &NodeType::getScheduleNodeCount;
};
template <typename NodeType, typename std::enable_if<!_hasGetScheduleNodeCountFunction<NodeType>::value, bool>::type = 0>
static inline GetScheduleNodeCountFunction getScheduleNodeCountFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method getScheduleNodes() if it exists
// Usage: auto getScheduleNodesFn = getScheduleNodesFunction<NodeClass>();
using GetScheduleNodesFunction = std::add_pointer<void(
const GraphContextObj&, const NodeObj&, const ScheduleNodeObj*, size_t, ScheduleNodeObj*, size_t)>::type;
template <typename NodeType>
constexpr auto _checkGetScheduleNodesFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkGetScheduleNodesFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().getScheduleNodes(std::declval<const GraphContextObj&>(),
std::declval<const NodeObj&>(),
std::declval<const ScheduleNodeObj*>(),
std::declval<size_t>(),
std::declval<ScheduleNodeObj*>(),
std::declval<size_t>()))>;
template <typename NodeType>
struct _hasGetScheduleNodesFunction : decltype(_checkGetScheduleNodesFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasGetScheduleNodesFunction<NodeType>::value, bool>::type = 0>
static inline GetScheduleNodesFunction getScheduleNodesFunction()
{
return &NodeType::getScheduleNodes;
};
template <typename NodeType, typename std::enable_if<!_hasGetScheduleNodesFunction<NodeType>::value, bool>::type = 0>
static inline GetScheduleNodesFunction getScheduleNodesFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method onConnectionMade() if it exists
// Usage: auto onConnectionTypeResolveFn = onConnectionTypeResolve<NodeClass>();
using OnConnectionTypeResolveFunction = std::add_pointer<void(const NodeObj&)>::type;
template <typename NodeType>
constexpr auto _checkOnConnectionTypeResolveFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkOnConnectionTypeResolveFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().onConnectionTypeResolve(
std::declval<const NodeObj&>()))>;
template <typename NodeType>
struct _hasOnConnectionTypeResolveFunction : decltype(_checkOnConnectionTypeResolveFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasOnConnectionTypeResolveFunction<NodeType>::value, bool>::type = 0>
static inline OnConnectionTypeResolveFunction onConnectionTypeResolveFunction()
{
return &NodeType::onConnectionTypeResolve;
};
template <typename NodeType, typename std::enable_if<!_hasOnConnectionTypeResolveFunction<NodeType>::value, bool>::type = 0>
static inline OnConnectionTypeResolveFunction onConnectionTypeResolveFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method inspect() if it exists
// Usage: auto inspectFn = inspectFunction<NodeClass>();
using InspectFunction = std::add_pointer<bool(const NodeTypeObj& nodeType, inspect::IInspector*)>::type;
template <typename NodeType>
constexpr auto _checkInspectFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkInspectFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().inspect(std::declval<const NodeTypeObj&>(),
std::declval<inspect::IInspector*>()))>;
template <typename NodeType>
struct _hasInspectFunction : decltype(_checkInspectFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasInspectFunction<NodeType>::value, bool>::type = 0>
static inline InspectFunction inspectFunction()
{
return &NodeType::inspect;
};
template <typename NodeType, typename std::enable_if<!_hasInspectFunction<NodeType>::value, bool>::type = 0>
static inline InspectFunction inspectFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method computeVectorized() if it exists
// Usage: auto computeVectorizedFn = computeVectorizedFunction<NodeClass>();
using ComputeVectorizedFunction = std::add_pointer<size_t(const GraphContextObj&, const NodeObj&, size_t)>::type;
template <typename NodeType>
constexpr auto _checkComputeVectorizedFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkComputeVectorizedFunction(int)
-> sfinae_true<decltype(std::declval<const NodeType&>().computeVectorized(std::declval<const GraphContextObj&>(),
std::declval<const NodeObj&>(),
std::declval<size_t>()))>;
template <typename NodeType>
struct _hasComputeVectorizedFunction : decltype(_checkComputeVectorizedFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasComputeVectorizedFunction<NodeType>::value, bool>::type = 0>
static inline ComputeVectorizedFunction computeVectorizedFunction()
{
return &NodeType::computeVectorized;
};
template <typename NodeType, typename std::enable_if<!_hasComputeVectorizedFunction<NodeType>::value, bool>::type = 0>
static inline ComputeVectorizedFunction computeVectorizedFunction()
{
return nullptr;
};
// Template collection to provide a pointer to the static method releaseInstance() if it exists
// Usage: auto releaseInstanceFn = releaseInstanceFunction<NodeClass>();
using ReleaseInstanceFunction = std::add_pointer<void(const NodeObj&, NameToken)>::type;
template <typename NodeType>
constexpr auto _checkReleaseInstanceFunction(long) -> sfinae_false<NodeType>;
template <typename NodeType>
constexpr auto _checkReleaseInstanceFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().releaseInstance(
std::declval<const NodeObj&>(), std::declval<NameToken>()))>;
template <typename NodeType>
struct _hasReleaseInstanceFunction : decltype(_checkReleaseInstanceFunction<NodeType>(0))
{
};
template <typename NodeType, typename std::enable_if<_hasReleaseInstanceFunction<NodeType>::value, bool>::type = 0>
static inline ReleaseInstanceFunction releaseInstanceFunction()
{
return &NodeType::releaseInstance;
};
template <typename NodeType, typename std::enable_if<!_hasReleaseInstanceFunction<NodeType>::value, bool>::type = 0>
static inline ReleaseInstanceFunction releaseInstanceFunction()
{
return nullptr;
};
|
omniverse-code/kit/include/omni/graph/core/ISchedulingHints2.gen.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Interface extension for ISchedulingHints that adds a new "pure" hint
template <>
class omni::core::Generated<omni::graph::core::ISchedulingHints2_abi> : public omni::graph::core::ISchedulingHints2_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::ISchedulingHints2")
/**
* Get the flag describing the node's purity state.
*
* @returns Value of the PurityStatus flag.
*/
omni::graph::core::ePurityStatus getPurityStatus() noexcept;
/**
* Set the flag describing the node's purity status.
*
* @param[in] newPurityStatus New value of the PurityStatus flag.
*/
void setPurityStatus(omni::graph::core::ePurityStatus newPurityStatus) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::core::ePurityStatus omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>::getPurityStatus() noexcept
{
return getPurityStatus_abi();
}
inline void omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>::setPurityStatus(
omni::graph::core::ePurityStatus newPurityStatus) noexcept
{
setPurityStatus_abi(newPurityStatus);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/core/IDirtyID.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "dirtyid/IDirtyID1.h"
#include "dirtyid/IDirtyID2.h"
|
omniverse-code/kit/include/omni/graph/core/INodeCategories.gen.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
/** Interface to the list of categories that a node type can belong to */
template <>
class omni::core::Generated<omni::graph::core::INodeCategories_abi> : public omni::graph::core::INodeCategories_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::INodeCategories")
/**
* Get the number of categories available
*
* @returns Count of fixed category types
*/
size_t getCategoryCount() noexcept;
/**
* Get the list of available categories and their descriptions.
*
* The caller is responsible for allocating and destroying buffers large enough to hold "bufferSize" results.
* If bufferSize > getCategoryCount() then the entries at the ends of the buffers will be filled with nullptr.
*
* @param[in] categoryNameBuffer List of category names
* @param[in] categoryDescriptionBuffer List of category descriptions corresponding to the names
* @param[in] bufferSize Number of entries to fill in the buffers
*
* @return true if the category buffer was successfully filled and the bufferSize matched the category count
*/
bool getCategories(const char** categoryNameBuffer, const char** categoryDescriptionBuffer, size_t bufferSize) noexcept;
/**
* Define a new category
*
* @param[in] categoryName Name of the new category
* @param[in] categoryDescription Description of the category
*
* @return false if there was already a category with the given name
*/
bool defineCategory(const char* categoryName, const char* categoryDescription) noexcept;
/**
* Remove an existing category, mainly to manage the ones created by a node type for itself
*
* @param[in] categoryName Name of the category to remove
*
* @return false if there was no category with the given name
*/
bool removeCategory(const char* categoryName) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline size_t omni::core::Generated<omni::graph::core::INodeCategories_abi>::getCategoryCount() noexcept
{
return getCategoryCount_abi();
}
inline bool omni::core::Generated<omni::graph::core::INodeCategories_abi>::getCategories(
const char** categoryNameBuffer, const char** categoryDescriptionBuffer, size_t bufferSize) noexcept
{
return getCategories_abi(categoryNameBuffer, categoryDescriptionBuffer, bufferSize);
}
inline bool omni::core::Generated<omni::graph::core::INodeCategories_abi>::defineCategory(
const char* categoryName, const char* categoryDescription) noexcept
{
return defineCategory_abi(categoryName, categoryDescription);
}
inline bool omni::core::Generated<omni::graph::core::INodeCategories_abi>::removeCategory(const char* categoryName) noexcept
{
return removeCategory_abi(categoryName);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
|
omniverse-code/kit/include/omni/graph/core/BundlePrimsImpl.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BundlePrims.h"
namespace omni
{
namespace graph
{
namespace core
{
// ====================================================================================================
//
// Bundle Attribute
//
// Because entire Bundle Prims is inlined, we have to put definition of those functions
// after declaration of ConstBundlePrim and ConstBundlePrims.
// ====================================================================================================
inline BundleAttrib::BundleAttrib(ConstBundlePrim& prim, omni::graph::core::NameToken name) noexcept
{
using namespace omni::graph::core;
// Get attribute handle and attribute properties
IConstBundle2* bundle = prim.getConstBundlePtr();
ConstAttributeDataHandle attributeHandle = bundle->getConstAttributeByName(name);
if(!attributeHandle.isValid())
{
return;
}
GraphContextObj const& context = prim.getConstBundlePrims()->context();
m_bundlePrim = &prim;
m_name = name;
m_type = omni::fabric::TypeC(context.iAttributeData->getType(context, attributeHandle));
// Read attribute properties.
ConstAttributeDataHandle propertyAttributeHandle;
propertyAttributeHandle =
bundle->getConstAttributeMetadataByName(name, detail::getAttrInterpolationDefinition().token);
if(propertyAttributeHandle.isValid())
{
m_interpolation = *getDataR<NameToken>(context, propertyAttributeHandle);
}
propertyAttributeHandle =
bundle->getConstAttributeMetadataByName(name, detail::getAttrSourceDefinition().token);
if(propertyAttributeHandle.isValid())
{
m_source = static_cast<Source>(*getDataR<SourceType>(context, propertyAttributeHandle));
}
}
inline BundleAttrib::BundleAttrib(BundlePrim& prim, omni::graph::core::NameToken name, omni::graph::core::Type type, size_t arrayElementCount, Source source) noexcept
: BundleAttrib{ prim, name }
{
using namespace omni::graph::core;
// Attribute exists!
if (m_bundlePrim)
{
return;
}
// Attribute does not exist.
IBundle2* bundle = prim.getBundlePtr();
GraphContextObj const& context = prim.getConstBundlePrims()->context();
auto handle = bundle->createAttribute(name, type, arrayElementCount);
omni::graph::core::getDataW<void*>(context, handle); // remove after OM-50059 is merged.
m_bundlePrim = &prim;
m_name = name;
m_type = omni::fabric::TypeC(type);
// Interpolation is optional.
// Source of the attribute identifies "data" or "relationship"
setSource(source);
}
inline BundlePrim* BundleAttrib::getBundlePrim() noexcept
{
IConstBundle2* constBundle = getConstBundlePtr();
if(auto bundle = omni::cast<IBundle2>(constBundle))
{
return static_cast<BundlePrim*>(m_bundlePrim);
}
return nullptr;
}
inline omni::graph::core::IConstBundle2* BundleAttrib::getConstBundlePtr() const noexcept
{
ConstBundlePrim* bundlePrim = getBundlePrim();
return bundlePrim->getConstBundlePtr();
}
inline omni::graph::core::IBundle2* BundleAttrib::getBundlePtr() noexcept
{
BundlePrim* bundlePrim = getBundlePrim();
return bundlePrim->getBundlePtr();
}
inline DirtyIDType BundleAttrib::dirtyID() const noexcept
{
auto const context = getConstBundlePtr()->getContext();
auto id = carb::getCachedInterface<omni::graph::core::ComputeGraph>()->getDirtyIDInterfacePtr(context);
return id->getForAttribute(this->handle());
}
// ====================================================================================================
//
// Bundle Primitive
//
// ====================================================================================================
inline BundlePrim::BundlePrim(BundlePrims& bundlePrims, omni::core::ObjectPtr<IBundle2> bundle)
: ConstBundlePrim{ bundlePrims, std::move(bundle) }
{
}
inline void BundlePrim::setPath(NameToken path) noexcept
{
const detail::AttrDefinition& attrDef = detail::getPrimPathDefinition();
AttributeDataHandle pathAttr = getBundlePtr()->getAttributeByName(attrDef.token);
if (!pathAttr.isValid())
{
pathAttr = getBundlePtr()->createAttribute(attrDef.token, attrDef.type);
}
BundlePrims* bundlePrims = getBundlePrims();
*getDataW<NameToken>(bundlePrims->context(), pathAttr) = path;
}
inline void BundlePrim::setType(NameToken type) noexcept
{
const detail::AttrDefinition& attrDef = detail::getPrimTypeDefinition();
AttributeDataHandle typeAttr = getBundlePtr()->getAttributeByName(attrDef.token);
if (!typeAttr.isValid())
{
typeAttr = getBundlePtr()->createAttribute(attrDef.token, attrDef.type);
}
BundlePrims* bundlePrims = getBundlePrims();
*getDataW<NameToken>(bundlePrims->context(), typeAttr) = type;
}
inline BundleAttrib* BundlePrim::addAttr(omni::graph::core::NameToken attrName,
omni::graph::core::Type type,
size_t arrayElementCount,
BundleAttribSource source) noexcept
{
using namespace omni::graph::core;
auto& attrs = getAttributes();
// Erase existing attribute.
auto it = attrs.find(attrName);
if (it != attrs.end())
{
it->second->clearContents();
attrs.erase(it);
}
auto attr = new BundleAttrib{ *this, attrName, type, arrayElementCount, source };
attrs.emplace(attrName, attr);
return attr;
}
inline BundleAttrib* BundlePrim::addRelationship(omni::graph::core::NameToken name, size_t targetCount) noexcept
{
return addAttr(name, detail::s_relationshipType, targetCount, BundleAttribSource::Relationship);
}
inline bool BundlePrim::addAttrs(std::vector<BundlePrim::AddAttrInfo> const& attrList) noexcept
{
using namespace omni::graph::core;
IBundle2* bundle = getBundlePtr();
auto& attrs = getAttributes();
// Remove attributes that exists but properties are different.
std::vector<BundlePrim::AddAttrInfo> attrToCreate;
attrToCreate.reserve(attrList.size());
for (auto const& newAttr : attrList) {
auto it = attrs.find(newAttr.attrName);
if (it == attrs.end())
{
attrToCreate.push_back(newAttr);
continue;
}
BundleAttrib const* attr = it->second.get();
if (attr->type() != newAttr.type ||
attr->size() != newAttr.arrayElementCount ||
attr->source() != newAttr.source)
{
it->second->clearContents();
attrs.erase(it);
attrToCreate.push_back(newAttr);
}
// attribute is the same nothing to do.
}
// Create attributes that require instantiation.
for (auto const& tmp : attrToCreate)
{
auto attr = new BundleAttrib{ *this, tmp.attrName, tmp.type, tmp.arrayElementCount, tmp.source };
attrs.emplace(tmp.attrName, attr);
}
return true;
}
inline void BundlePrim::removeAttr(omni::graph::core::NameToken attrName) noexcept
{
using namespace omni::graph::core;
// Remove attribute from internal member.
auto& attrs = getAttributes();
auto it = attrs.find(attrName);
if (it != attrs.end())
{
it->second->clearContents();
attrs.erase(it);
}
}
inline void BundlePrim::clearContents() noexcept
{
auto& attrs = getAttributes();
for (auto& attr : attrs)
{
attr.second->clearContents();
}
getAttributes().clear();
}
inline void BundlePrim::copyContentsFrom(ConstBundlePrim const& source, bool removeAttrsNotInSource /* = true*/) noexcept
{
return copyContentsFrom(const_cast<ConstBundlePrim&>(source), removeAttrsNotInSource);
}
inline void BundlePrim::copyContentsFrom(ConstBundlePrim& source, bool removeAttrsNotInSource /* = true*/) noexcept
{
CARB_IGNOREWARNING_MSC_WITH_PUSH(4996)
CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wdeprecated-declarations")
// Nothing to do if they're already equal.
if (dirtyID() == source.dirtyID())
return;
BundlePrims* bundlePrims = getBundlePrims();
// Add/set any attributes from source, if the dirty IDs are different, being sure to copy the dirty IDs.
// first we batch add them, then we copy the contents
std::vector<BundlePrim::AddAttrInfo> attrsToAdd;
attrsToAdd.reserve(source.attrCount());
for (auto const& sourceAttr : source)
{
NameToken name = sourceAttr.name();
// NOTE: Request a const attribute, to avoid bumping its dirty ID.
BundleAttrib const* constDestAttr = getConstAttr(name);
if (constDestAttr != nullptr && constDestAttr->dirtyID() == sourceAttr.dirtyID())
{
continue;
}
if (constDestAttr == nullptr)
{
attrsToAdd.push_back(
{ sourceAttr.m_name, Type(sourceAttr.m_type), 0, sourceAttr.m_source });
}
}
// add the attributes
addAttrs(attrsToAdd);
// copy the data
for (auto const& sourceAttr : source)
{
NameToken name = sourceAttr.name();
// NOTE: Request a const attribute, to avoid bumping its dirty ID.
BundleAttrib const* constDestAttr = getConstAttr(name);
CARB_ASSERT(constDestAttr != nullptr);
if (constDestAttr == nullptr || constDestAttr->dirtyID() == sourceAttr.dirtyID())
{
continue;
}
const_cast<BundleAttrib*>(constDestAttr)->copyContentsFrom(sourceAttr);
}
CARB_ASSERT(attrCount() >= source.attrCount());
// If there are more attributes in this than in source, remove any that aren't in source.
auto& attrMap = getAttributes();
if (attrCount() > source.attrCount() && removeAttrsNotInSource)
{
std::vector<NameToken> attrsToRemove;
for (auto it = attrMap.begin(); it != attrMap.end();)
{
if (source.getConstAttr(it->second->name()) == nullptr)
{
it->second->clearContents();
it = attrMap.erase(it);
}
else
{
++it;
}
}
}
CARB_IGNOREWARNING_GNUC_POP
CARB_IGNOREWARNING_MSC_POP
}
inline BundleAttrib* BundlePrim::getAttr(omni::graph::core::NameToken attrName) noexcept
{
auto& attrs = getAttributes();
auto it = attrs.find(attrName);
if (it == attrs.end())
{
return nullptr;
}
BundleAttrib* attr = it->second.get();
return attr;
}
inline omni::graph::core::BundleHandle BundlePrim::handle() noexcept
{
return getBundlePtr()->getHandle();
}
inline BundlePrims* BundlePrim::getBundlePrims() noexcept
{
omni::graph::core::IBundle2* bundle = getBundlePtr();
if (bundle)
{
ConstBundlePrims* bundlePrims = ConstBundlePrim::getConstBundlePrims();
return static_cast<BundlePrims*>(bundlePrims);
}
return nullptr;
}
inline BundlePrims* BundlePrim::bundlePrims() noexcept
{
return getBundlePrims();
}
inline BundlePrimAttrIterator BundlePrim::begin() noexcept
{
return BundlePrimAttrIterator(*this, getAttributes().begin());
}
inline BundlePrimAttrIterator BundlePrim::end() noexcept
{
return BundlePrimAttrIterator(*this, getAttributes().end());
}
inline ConstBundlePrimAttrIterator BundlePrim::cbegin() noexcept
{
return ConstBundlePrim::begin();
}
inline ConstBundlePrimAttrIterator BundlePrim::cend() noexcept
{
return ConstBundlePrim::end();
}
inline omni::graph::core::IBundle2* BundlePrim::getBundlePtr(omni::graph::core::IConstBundle2* constBundle) noexcept
{
auto bundle = omni::cast<omni::graph::core::IBundle2>(constBundle);
return bundle.get();
}
inline omni::graph::core::IBundle2* BundlePrim::getBundlePtr() noexcept
{
using namespace omni::graph::core;
IConstBundle2* constBundle = getConstBundlePtr();
IBundle2* bundle = getBundlePtr(constBundle);
return bundle;
}
// ====================================================================================================
//
// Bundle Primitives
//
// ====================================================================================================
inline BundlePrims::~BundlePrims() noexcept
{
detach();
}
inline omni::graph::core::BundleHandle BundlePrims::handle() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
return bundle->getHandle();
}
return BundleHandle{ BundleHandle::invalidValue() };
}
inline void BundlePrims::ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept
{
}
inline BundlePrims::BundlePrims()
: ConstBundlePrims()
{
}
inline BundlePrims::BundlePrims(omni::graph::core::GraphContextObj const& context,
omni::graph::core::BundleHandle const& bundle)
: BundlePrims()
{
attach(context, bundle);
}
inline void BundlePrims::attach(omni::graph::core::GraphContextObj const& context,
omni::graph::core::BundleHandle const& bundleHandle) noexcept
{
using namespace omni::graph::core;
ComputeGraph* computeGraph = carb::getCachedInterface<ComputeGraph>();
omni::core::ObjectPtr<IBundleFactory> factoryPtr = computeGraph->getBundleFactoryInterfacePtr();
omni::core::ObjectPtr<IBundle2> bundlePtr = factoryPtr->getBundle(context, bundleHandle);
ConstBundlePrims::attach(std::move(factoryPtr), std::move(bundlePtr));
IBundle2* bundle = getBundlePtr();
auto& bundlePrimIndexOffsetDef = detail::getBundlePrimIndexOffsetDefinition();
m_bundlePrimIndexOffsetAttr = bundle->getBundleMetadataByName(bundlePrimIndexOffsetDef.token);
}
inline void BundlePrims::detach() noexcept
{
using omni::graph::core::AttributeDataHandle;
//
// Bundle Level Attributes
//
m_bundlePrimIndexOffsetAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() };
ConstBundlePrims::detach();
}
inline BundlePrim* BundlePrims::getPrim(BundlePrimIndex primIndex) noexcept
{
using namespace omni::graph::core;
auto createSortedBundlePrims = [this, &bundlePrims = *this]() -> BundlePrimArray
{
const size_t childBundleCount = getBundlePtr()->getChildBundleCount();
std::vector<BundleHandle> handles(childBundleCount);
getBundlePtr()->getChildBundles(handles.data(), handles.size());
const GraphContextObj& graphContext = context();
BundlePrimArray prims(childBundleCount);
BundlePrimArray nonIndexedPrims;
for (BundleHandle& handle : handles)
{
auto childBundle = getBundleFactoryPtr()->getBundle(graphContext, handle);
BundlePrim* prim = new BundlePrim(bundlePrims, childBundle);
BundlePrimIndex index = prim->primIndex();
CARB_ASSERT(index < childBundleCount || index == kInvalidBundlePrimIndex);
if (index < childBundleCount)
{
prims[index].reset(prim);
}
else
{
nonIndexedPrims.emplace_back(prim);
}
}
// Merge non-indexed prims into the sorted array.
if (!nonIndexedPrims.empty())
{
BundlePrimIndex index = 0;
for (ConstBundlePrimPtr& nonIndexedPrim : nonIndexedPrims)
{
while (index < childBundleCount)
{
ConstBundlePrimPtr& prim = prims[index++];
if (!prim)
{
prim = std::move(nonIndexedPrim);
break;
}
}
}
}
return prims;
};
// Since we acquire BundlePrim instance through BundlePrims interface,
// we are required to bump dirty id of this prim because intention is to modify it.
auto bundlePrim = static_cast<BundlePrim*>(ConstBundlePrims::getConstPrim(primIndex, createSortedBundlePrims));
return bundlePrim;
}
inline BundlePrim* BundlePrims::getClearedPrim(BundlePrimIndex primIndex) noexcept
{
BundlePrim* bundlePrim = getPrim(primIndex);
if(!bundlePrim)
{
return nullptr;
}
bundlePrim->clearContents();
return bundlePrim;
}
inline BundlePrim& BundlePrims::getCommonAttrs() noexcept
{
ConstBundlePrim& commonAttributes = ConstBundlePrims::getConstCommonAttrs();
return static_cast<BundlePrim&>(commonAttributes);
}
inline omni::graph::core::IBundle2* BundlePrims::getBundlePtr() noexcept
{
using namespace omni::graph::core;
auto constBundle = getConstBundlePtr();
auto bundle = omni::cast<IBundle2>(constBundle);
return bundle.get();
}
inline void BundlePrims::clearContents() noexcept
{
for (BundlePrimIndex primIndex = getPrimCount(); primIndex != 0;)
{
--primIndex;
removePrim(primIndex);
}
// Delete all attributes from this bundle.
BundlePrim& thisBundle = getCommonAttrs();
thisBundle.clearContents();
// remove internal data
IBundle2* bundle = getBundlePtr();
if (m_bundlePrimIndexOffsetAttr.isValid())
{
const detail::AttrDefinition& attrDef = detail::getBundlePrimIndexOffsetDefinition();
bundle->removeBundleMetadata(attrDef.token);
m_bundlePrimIndexOffsetAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() };
}
// Clearing bundle prims internal attributes such as bundleDirtyID and others causes downstream problems.
// Initial implementation never cleared those attributes.
#if 0
auto bundlePrimsInternalAttributes = {
std::ref(m_bundleDirtyIDAttr), //
std::ref(m_primIndexAttr), //
};
for (auto& internalAttribute : bundlePrimsInternalAttributes)
{
if (internalAttribute.get().isValid())
{
bundle->removeAttribute(internalAttribute.get());
}
internalAttribute.get() = AttributeDataHandle{ AttributeDataHandle::invalidValue() };
}
#endif
}
inline bool BundlePrims::removePrim(ConstBundlePrim* prim) noexcept
{
return removePrim(prim->primIndex());
}
inline bool BundlePrims::removePrim(BundlePrimIndex primIndex) noexcept
{
using namespace omni::graph::core;
IBundle2* bundle = getBundlePtr();
auto& context = this->context();
auto& prims = getPrimitives();
// remove children and attributes
BundlePrim* childBundlePrim = getPrim(primIndex);
if (!childBundlePrim)
{
return false;
}
// clear contents and remove bundle from a map
childBundlePrim->clearContents();
bundle->removeChildBundle(childBundlePrim->handle());
// If removed primitive is not the last one,
// swap last one with removed one and update index.
size_t const newPrimCount = prims.size() - 1;
if (primIndex != newPrimCount)
{
prims[primIndex] = std::move(prims[newPrimCount]);
childBundlePrim = getPrim(primIndex);
IBundle2* childBundle = childBundlePrim->getBundlePtr();
CARB_ASSERT(childBundle);
if (childBundle)
{
const detail::AttrDefinition& attrDef = detail::getPrimIndexDefinition();
AttributeDataHandle primIndexAttr = childBundle->getBundleMetadataByName(attrDef.token);
CARB_ASSERT(primIndexAttr.isValid());
if (primIndexAttr.isValid())
{
*getDataW<uint64_t>(context, primIndexAttr) = primIndex;
}
}
}
prims.resize(newPrimCount);
return true;
}
inline size_t BundlePrims::addPrims(size_t primCountToAdd) noexcept
{
using namespace omni::graph::core;
size_t oldPrimCount = getConstBundlePtr()->getChildBundleCount();
if (primCountToAdd == 0)
{
return oldPrimCount;
}
size_t const newPrimCount = oldPrimCount + primCountToAdd;
CARB_ASSERT(newPrimCount > oldPrimCount);
IBundle2* bundle = getBundlePtr();
IBundleFactory* factory = getBundleFactoryPtr();
auto& context = this->context();
if (!m_bundlePrimIndexOffsetAttr.isValid())
{
auto& attrDef = detail::getBundlePrimIndexOffsetDefinition();
m_bundlePrimIndexOffsetAttr = bundle->getBundleMetadataByName(attrDef.token);
if (!m_bundlePrimIndexOffsetAttr.isValid())
{
m_bundlePrimIndexOffsetAttr = bundle->createBundleMetadata(attrDef.token, attrDef.type);
*getDataW<uint64_t>(context, m_bundlePrimIndexOffsetAttr) = 0;
}
}
uint64_t* bundlePrimIndexOffsetData = getDataW<uint64_t>(context, m_bundlePrimIndexOffsetAttr);
auto& primIndexDef = detail::getPrimIndexDefinition();
// Create new child bundles.
// All children are called 'prim' + primIndex, because IBundle2 interface does not allow sparse hierarchy.
// Then child paths are stored as an attribute.
BundlePrimArray& prims = getPrimitives();
prims.resize(newPrimCount);
std::string primPathStr;
for (BundlePrimIndex primIndex = oldPrimCount; primIndex < newPrimCount; ++primIndex)
{
primPathStr = "prim" + std::to_string(*bundlePrimIndexOffsetData + primIndex - oldPrimCount);
NameToken primName = context.iToken->getHandle(primPathStr.data());
BundleHandle childHandle = bundle->createChildBundle(primName);
auto childBundle = factory->getBundle(context, childHandle);
CARB_ASSERT(childBundle);
// A metadata attribute is created for each child bundle to store its prim index, so that each BundlePrims or
// ConstBundlePrims instance attached to this bundle can have consistent prim indices.
if (childBundle)
{
AttributeDataHandle primIndexAttr = childBundle->createBundleMetadata(primIndexDef.token, primIndexDef.type);
*getDataW<uint64_t>(context, primIndexAttr) = primIndex;
}
auto newPrim = new BundlePrim(*this, std::move(childBundle));
prims[primIndex].reset(newPrim);
}
*bundlePrimIndexOffsetData += primCountToAdd; // Update prim index offset.
return oldPrimCount;
}
inline BundlePrimIterator BundlePrims::begin() noexcept
{
return BundlePrimIterator(*this);
}
inline BundlePrimIterator BundlePrims::end() noexcept
{
return BundlePrimIterator(*this, getPrimCount());
}
inline ConstBundlePrimIterator BundlePrims::cbegin() noexcept
{
return ConstBundlePrims::begin();
}
inline ConstBundlePrimIterator BundlePrims::cend() noexcept
{
return ConstBundlePrims::end();
}
// ====================================================================================================
//
// Bundle Primitive Iterator
//
// ====================================================================================================
inline BundlePrimIterator::BundlePrimIterator(BundlePrims& bundlePrims, BundlePrimIndex primIndex) noexcept
: m_bundlePrims(&bundlePrims), m_primIndex(primIndex)
{
}
inline bool BundlePrimIterator::operator==(BundlePrimIterator const& that) const noexcept
{
return m_bundlePrims == that.m_bundlePrims && m_primIndex == that.m_primIndex;
}
inline bool BundlePrimIterator::operator!=(BundlePrimIterator const& that) const noexcept
{
return !(*this == that);
}
inline BundlePrim& BundlePrimIterator::operator*() noexcept
{
return *(m_bundlePrims->getPrim(m_primIndex));
}
inline BundlePrim* BundlePrimIterator::operator->() noexcept
{
return m_bundlePrims->getPrim(m_primIndex);
}
inline BundlePrimIterator& BundlePrimIterator::operator++() noexcept
{
++m_primIndex;
return *this;
}
// ====================================================================================================
//
// Bundle Primitive Attribute Iterator
//
// ====================================================================================================
inline BundlePrimAttrIterator::BundlePrimAttrIterator(BundlePrim& bundlePrim, BundlePrim::AttrMapIteratorType attrIter) noexcept
: m_bundlePrim(&bundlePrim), m_attrIter(attrIter)
{
}
inline bool BundlePrimAttrIterator::operator==(BundlePrimAttrIterator const& that) const noexcept
{
return m_bundlePrim == that.m_bundlePrim && m_attrIter == that.m_attrIter;
}
inline bool BundlePrimAttrIterator::operator!=(BundlePrimAttrIterator const& that) const noexcept
{
return !(*this == that);
}
inline BundleAttrib const* BundlePrimAttrIterator::getConst() noexcept
{
CARB_ASSERT(m_bundlePrim != nullptr);
CARB_ASSERT(m_attrIter->second);
BundleAttrib* attr = m_attrIter->second.get();
return attr;
}
inline BundleAttrib& BundlePrimAttrIterator::operator*() noexcept
{
CARB_ASSERT(m_bundlePrim != nullptr);
CARB_ASSERT(m_attrIter->second);
BundleAttrib* attr = m_attrIter->second.get();
return *attr;
}
inline BundleAttrib* BundlePrimAttrIterator::operator->() noexcept
{
CARB_ASSERT(m_bundlePrim != nullptr);
CARB_ASSERT(m_attrIter->second);
BundleAttrib* attr = m_attrIter->second.get();
return attr;
}
inline BundlePrimAttrIterator& BundlePrimAttrIterator::operator++() noexcept
{
++m_attrIter;
return *this;
}
} // namespace core
} // namespace graph
} // namespace omni
|
omniverse-code/kit/include/omni/graph/core/PyIBundle.gen.h | // Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindIBundle2(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::core::IBundle2_abi>,
omni::core::ObjectPtr<omni::core::Generated<omni::graph::core::IBundle2_abi>>,
omni::core::Api<omni::graph::core::IConstBundle2_abi>>
clsParent(m, "_IBundle2");
py::class_<omni::graph::core::IBundle2, omni::core::Generated<omni::graph::core::IBundle2_abi>,
omni::core::ObjectPtr<omni::graph::core::IBundle2>, omni::core::Api<omni::graph::core::IConstBundle2_abi>>
cls(m, "IBundle2", R"OMNI_BIND_RAW_(Provide read write access to recursive bundles.)OMNI_BIND_RAW_");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::core::IBundle2>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::core::IBundle2>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::core::IBundle2 instantiation");
}
return tmp;
}));
return omni::python::PyBind<omni::graph::core::IBundle2>::bind(cls);
}
|
omniverse-code/kit/include/omni/graph/core/PyIVariable.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindeVariableScope(py::module& m)
{
py::enum_<omni::graph::core::eVariableScope> e(
m, "eVariableScope", R"OMNI_BIND_RAW_(Scope in which the variable has been made available)OMNI_BIND_RAW_");
e.value("E_PRIVATE", omni::graph::core::eVariableScope::ePrivate,
R"OMNI_BIND_RAW_(Variable is accessible only to its graph )OMNI_BIND_RAW_");
e.value("E_READ_ONLY", omni::graph::core::eVariableScope::eReadOnly,
R"OMNI_BIND_RAW_(Variable can be read by other graphs )OMNI_BIND_RAW_");
e.value("E_PUBLIC", omni::graph::core::eVariableScope::ePublic,
R"OMNI_BIND_RAW_(Variable can be read/written by other graphs )OMNI_BIND_RAW_");
return e;
}
auto bindIVariable(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::core::IVariable_abi>,
omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::IVariable_abi>>, omni::core::IObject>
clsParent(m, "_IVariable");
py::class_<omni::graph::core::IVariable, omni::core::Generated<omni::graph::core::IVariable_abi>,
omni::python::detail::PyObjectPtr<omni::graph::core::IVariable>, omni::core::IObject>
cls(m, "IVariable",
R"OMNI_BIND_RAW_(Object that contains a value that is local to a graph, available from anywhere in the graph)OMNI_BIND_RAW_");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::core::IVariable>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::core::IVariable>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::core::IVariable instantiation");
}
return tmp;
}));
cls.def_property_readonly("name", &omni::graph::core::IVariable::getName);
cls.def_property_readonly("source_path", &omni::graph::core::IVariable::getSourcePath);
cls.def_property("category", &omni::graph::core::IVariable::getCategory,
[](omni::graph::core::IVariable* self, const char* category) { self->setCategory(category); });
cls.def_property("display_name", &omni::graph::core::IVariable::getDisplayName,
[](omni::graph::core::IVariable* self, const char* displayName)
{ self->setDisplayName(displayName); });
cls.def_property("tooltip", &omni::graph::core::IVariable::getTooltip,
[](omni::graph::core::IVariable* self, const char* toolTip) { self->setTooltip(toolTip); });
cls.def_property("scope", &omni::graph::core::IVariable::getScope, &omni::graph::core::IVariable::setScope);
cls.def_property_readonly("valid", &omni::graph::core::IVariable::isValid);
return omni::python::PyBind<omni::graph::core::IVariable>::bind(cls);
}
|
Subsets and Splits