file_path
stringlengths
20
202
content
stringlengths
9
3.85M
size
int64
9
3.85M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
8
993
alphanum_fraction
float64
0.26
0.93
leggedrobotics/viplanner/viplanner/config/costmap_cfg.py
# Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab) # Author: Pascal Roth # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # python import os from dataclasses import dataclass from typing import Optional import yaml class Loader(yaml.SafeLoader): pass def construct_GeneralCostMapConfig(loader, node): return GeneralCostMapConfig(**loader.construct_mapping(node)) Loader.add_constructor( "tag:yaml.org,2002:python/object:viplanner.config.costmap_cfg.GeneralCostMapConfig", construct_GeneralCostMapConfig, ) def construct_ReconstructionCfg(loader, node): return ReconstructionCfg(**loader.construct_mapping(node)) Loader.add_constructor( "tag:yaml.org,2002:python/object:viplanner.config.costmap_cfg.ReconstructionCfg", construct_ReconstructionCfg, ) def construct_SemCostMapConfig(loader, node): return SemCostMapConfig(**loader.construct_mapping(node)) Loader.add_constructor( "tag:yaml.org,2002:python/object:viplanner.config.costmap_cfg.SemCostMapConfig", construct_SemCostMapConfig, ) def construct_TsdfCostMapConfig(loader, node): return TsdfCostMapConfig(**loader.construct_mapping(node)) Loader.add_constructor( "tag:yaml.org,2002:python/object:viplanner.config.costmap_cfg.TsdfCostMapConfig", construct_TsdfCostMapConfig, ) @dataclass class ReconstructionCfg: """ Arguments for 3D reconstruction using depth maps """ # directory where the environment with the depth (and semantic) images is located data_dir: str = "${USER_PATH_TO_DATA}" # environment name env: str = "town01" # image suffix depth_suffix = "_cam0" sem_suffix = "_cam1" # higher resolution depth images available for reconstruction (meaning that the depth images are also taked by the semantic camera) high_res_depth: bool = False # reconstruction parameters voxel_size: float = 0.05 # [m] 0.05 for matterport 0.1 for carla start_idx: int = 0 # start index for reconstruction max_images: Optional[int] = 1000 # maximum number of images to reconstruct, if None, all images are used depth_scale: float = 1000.0 # depth scale factor # semantic reconstruction semantics: bool = True # speed vs. memory trade-off parameters point_cloud_batch_size: int = ( 200 # 3d points of nbr images added to point cloud at once (higher values use more memory but faster) ) """ Internal functions """ def get_data_path(self) -> str: return os.path.join(self.data_dir, self.env) def get_out_path(self) -> str: return os.path.join(self.out_dir, self.env) @dataclass class SemCostMapConfig: """Configuration for the semantic cost map""" # point-cloud filter parameters ground_height: Optional[float] = -0.5 # None for matterport -0.5 for carla -1.0 for nomoko robot_height: float = 0.70 robot_height_factor: float = 3.0 nb_neighbors: int = 100 std_ratio: float = 2.0 # keep high, otherwise ground will be removed downsample: bool = False # smoothing nb_neigh: int = 15 change_decimal: int = 3 conv_crit: float = ( 0.45 # ration of points that have to change by at least the #change_decimal decimal value to converge ) nb_tasks: Optional[int] = 10 # number of tasks for parallel processing, if None, all available cores are used sigma_smooth: float = 2.5 max_iterations: int = 1 # obstacle threshold (multiplied with highest loss value defined for a semantic class) obstacle_threshold: float = 0.8 # 0.5/ 0.6 for matterport, 0.8 for carla # negative reward for space with smallest cost (introduces a gradient in area with smallest loss value, steering towards center) # NOTE: at the end cost map is elevated by that amount to ensure that the smallest cost is 0 negative_reward: float = 0.5 # loss values rounded up to decimal #round_decimal_traversable equal to 0.0 are selected and the traversable gradient is determined based on them round_decimal_traversable: int = 2 # compute height map compute_height_map: bool = False # false for matterport, true for carla and nomoko @dataclass class TsdfCostMapConfig: """Configuration for the tsdf cost map""" # offset of the point cloud offset_z: float = 0.0 # filter parameters ground_height: float = 0.35 robot_height: float = 0.70 robot_height_factor: float = 2.0 nb_neighbors: int = 50 std_ratio: float = 0.2 filter_outliers: bool = True # dilation parameters sigma_expand: float = 2.0 obstacle_threshold: float = 0.01 free_space_threshold: float = 0.5 @dataclass class GeneralCostMapConfig: """General Cost Map Configuration""" # path to point cloud root_path: str = "town01" ply_file: str = "cloud.ply" # resolution of the cost map resolution: float = 0.1 # [m] (0.04 for matterport, 0.1 for carla) # map parameters clear_dist: float = 1.0 # cost map expansion over the point cloud space (prevent paths to go out of the map) # smoothing parameters sigma_smooth: float = 3.0 # cost map expansion x_min: Optional[float] = -8.05 # [m] if None, the minimum of the point cloud is used None (carla town01: -8.05 matterport: None) y_min: Optional[float] = -8.05 # [m] if None, the minimum of the point cloud is used None (carla town01: -8.05 matterport: None) x_max: Optional[float] = 346.22 # [m] if None, the maximum of the point cloud is used None (carla town01: 346.22 matterport: None) y_max: Optional[float] = 336.65 # [m] if None, the maximum of the point cloud is used None (carla town01: 336.65 matterport: None) @dataclass class CostMapConfig: """General Cost Map Configuration""" # cost map domains semantics: bool = True geometry: bool = False # name map_name: str = "cost_map_sem" # general cost map configuration general: GeneralCostMapConfig = GeneralCostMapConfig() # individual cost map configurations sem_cost_map: SemCostMapConfig = SemCostMapConfig() tsdf_cost_map: TsdfCostMapConfig = TsdfCostMapConfig() # visualize cost map visualize: bool = True # FILLED BY CODE -> DO NOT CHANGE ### x_start: float = None y_start: float = None # EoF
6,332
Python
30.984848
149
0.695515
leggedrobotics/viplanner/viplanner/config/coco_sem_meta.py
# Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab) # Author: Pascal Roth # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # Modified from https://github.com/google-research/deeplab2/blob/main/data/coco_constants.py # File containing the meta info of all classes from the COCO dataset. COCO_CATEGORIES = [ {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, ] _COCO_MAPPING = { "road": ["road"], "sidewalk": [ "pavement-merged", ], "floor": [ "floor-other-merged", "floor-wood", "platform", "playingfield", "rug-merged", ], "gravel": [ "gravel", ], "stairs": [ "stairs", ], "sand": [ "sand", ], "snow": [ "snow", ], "person": ["person"], "anymal": [ "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", ], "vehicle": [ "car", "bus", "truck", "boat", ], "on_rails": [ "train", "railroad", ], "motorcycle": [ "motorcycle", ], "bicycle": [ "bicycle", ], "building": [ "building-other-merged", "house", "roof", ], "wall": [ "wall-other-merged", "curtain", "mirror-stuff", "wall-brick", "wall-stone", "wall-tile", "wall-wood", "window-blind", "window-other", ], "fence": [ "fence-merged", ], "bridge": [ "bridge", ], "pole": [ "fire hydrant", "parking meter", ], "traffic_sign": [ "stop sign", ], "traffic_light": [ "traffic light", ], "bench": [ "bench", ], "vegetation": [ "potted plant", "flower", "tree-merged", "mountain-merged", "rock-merged", ], "terrain": [ "grass-merged", "dirt-merged", ], "water_surface": [ "river", "sea", "water-other", ], "sky": [ "sky-other-merged", "airplane", ], "dynamic": [ "backpack", "umbrella", "handbag", "tie", "suitcase", "book", # sports "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", # kitchen "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "microwave", "oven", "toaster", "sink", "refrigerator", # food "banana", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "fruit", "food-other-merged", "apple", # computer hardware "mouse", "remote", "keyboard", "cell phone", "laptop", # other "scissors", "teddy bear", "hair drier", "toothbrush", "net", "paper-merged", ], "static": [ "banner", "cardboard", "light", "tent", "unknown", ], "furniture": [ "chair", "couch", "bed", "dining table", "toilet", "clock", "vase", "blanket", "pillow", "shelf", "cabinet", "table-merged", "counter", "tv", ], "door": [ "door-stuff", ], "ceiling": ["ceiling-merged"], "indoor_soft": [ "towel", ], } def get_class_for_id(): id_to_class = {} for idx, id_dict in enumerate(COCO_CATEGORIES): success = False for class_name, keywords in _COCO_MAPPING.items(): if any(keyword in id_dict["name"] for keyword in keywords): id_to_class[idx] = class_name success = True break if not success: print("No mapping found for {}".format(id_dict["name"])) return id_to_class def get_class_for_id_mmdet(class_list: list): id_to_class = {} for idx, coco_class_name in enumerate(class_list): success = False for class_name, keywords in _COCO_MAPPING.items(): if any(keyword in coco_class_name for keyword in keywords): id_to_class[idx] = class_name success = True break if not success: print("No mapping found for {}".format(coco_class_name["name"])) return id_to_class if __name__ == "__main__": print(get_class_for_id())
14,752
Python
38.341333
92
0.46814
leggedrobotics/viplanner/viplanner/utils/dataset.py
# Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab) # Author: Pascal Roth # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import copy import math # python import os import random import shutil from pathlib import Path from random import sample from typing import Dict, List, Optional, Tuple import cv2 import networkx as nx import numpy as np import open3d as o3d import PIL import pypose as pp import scipy.spatial.transform as tf import torch import torch.nn.functional as F import torchvision.transforms as transforms from PIL import Image from scipy.spatial.kdtree import KDTree from skimage.util import random_noise from torch.utils.data import Dataset from tqdm import tqdm # implerative-planner-learning from viplanner.config import DataCfg from viplanner.cost_maps import CostMapPCD # set default dtype to float32 torch.set_default_dtype(torch.float32) class PlannerData(Dataset): def __init__( self, cfg: DataCfg, transform, semantics: bool = False, rgb: bool = False, pixel_mean: Optional[np.ndarray] = None, pixel_std: Optional[np.ndarray] = None, ) -> None: """_summary_ Args: cfg (DataCfg): Dataset COnfiguration transform (_type_): Compose torchvision transforms (resize and to tensor) semantics (bool, optional): If semantics are used in the network input. Defaults to False. """ self._cfg = cfg self.transform = transform self.semantics = semantics self.rgb = rgb assert not (semantics and rgb), "Semantics and RGB cannot be used at the same time" self.pixel_mean = pixel_mean self.pixel_std = pixel_std # vertical flip transform self.flip_transform = transforms.RandomHorizontalFlip(p=1.0) # init buffers self.depth_filename: List[str] = [] self.sem_rgb_filename: List[str] = [] self.depth_imgs: List[torch.Tensor] = [] self.sem_imgs: List[torch.Tensor] = [] self.odom: torch.Tensor = None self.goal: torch.Tensor = None self.pair_augment: np.ndarray = None self.fov_angle: float = 0.0 self.load_ram: bool = False return def update_buffers( self, depth_filename: List[str], sem_rgb_filename: List[str], odom: torch.Tensor, goal: torch.Tensor, pair_augment: np.ndarray, ) -> None: self.depth_filename = depth_filename self.sem_rgb_filename = sem_rgb_filename self.odom = odom self.goal = goal self.pair_augment = pair_augment return def set_fov(self, fov_angle): self.fov_angle = fov_angle return """Augment Images with black polygons""" def _add_random_polygons(self, image, nb_polygons, max_size): for i in range(nb_polygons): num_corners = random.randint(10, 20) polygon_points = np.random.randint(0, max_size, size=(num_corners, 2)) x_offset = np.random.randint(0, image.shape[0]) y_offset = np.random.randint(0, image.shape[1]) polygon_points[:, 0] += x_offset polygon_points[:, 1] += y_offset # Create a convex hull from the points hull = cv2.convexHull(polygon_points) # Draw the hull on the image cv2.fillPoly(image, [hull], (0, 0, 0)) return image """Load images""" def load_data_in_memory(self) -> None: """Load data into RAM to speed up training""" for idx in tqdm(range(len(self.depth_filename)), desc="Load images into RAM"): self.depth_imgs.append(self._load_depth_img(idx)) if self.semantics or self.rgb: self.sem_imgs.append(self._load_sem_rgb_img(idx)) self.load_ram = True return def _load_depth_img(self, idx) -> torch.Tensor: if self.depth_filename[idx].endswith(".png"): depth_image = Image.open(self.depth_filename[idx]) if self._cfg.real_world_data: depth_image = np.array(depth_image.transpose(PIL.Image.ROTATE_180)) else: depth_image = np.array(depth_image) else: depth_image = np.load(self.depth_filename[idx]) depth_image[~np.isfinite(depth_image)] = 0.0 depth_image = (depth_image / 1000.0).astype("float32") depth_image[depth_image > self._cfg.max_depth] = 0.0 # add noise to depth image if self._cfg.depth_salt_pepper or self._cfg.depth_gaussian: depth_norm = (depth_image - np.min(depth_image)) / (np.max(depth_image) - np.min(depth_image)) if self._cfg.depth_salt_pepper: depth_norm = random_noise( depth_norm, mode="s&p", amount=self._cfg.depth_salt_pepper, clip=False, ) if self._cfg.depth_gaussian: depth_norm = random_noise( depth_norm, mode="gaussian", mean=0, var=self._cfg.depth_gaussian, clip=False, ) depth_image = depth_norm * (np.max(depth_image) - np.min(depth_image)) + np.min(depth_image) if self._cfg.depth_random_polygons_nb and self._cfg.depth_random_polygons_nb > 0: depth_image = self._add_random_polygons( depth_image, self._cfg.depth_random_polygons_nb, self._cfg.depth_random_polygon_size, ) # transform depth image depth_image = self.transform(depth_image).type(torch.float32) if self.pair_augment[idx]: depth_image = self.flip_transform.forward(depth_image) return depth_image def _load_sem_rgb_img(self, idx) -> torch.Tensor: image = Image.open(self.sem_rgb_filename[idx]) if self._cfg.real_world_data: image = np.array(image.transpose(PIL.Image.ROTATE_180)) else: image = np.array(image) # normalize image if self.pixel_mean is not None and self.pixel_std is not None: image = (image - self.pixel_mean) / self.pixel_std # add noise to semantic image if self._cfg.sem_rgb_black_img: if random.randint(0, 99) < self._cfg.sem_rgb_black_img * 100: image = np.zeros_like(image) if self._cfg.sem_rgb_pepper: image = random_noise( image, mode="pepper", amount=self._cfg.depth_salt_pepper, clip=False, ) if self._cfg.sem_rgb_random_polygons_nb and self._cfg.sem_rgb_random_polygons_nb > 0: image = self._add_random_polygons( image, self._cfg.sem_rgb_random_polygons_nb, self._cfg.sem_rgb_random_polygon_size, ) # transform semantic image image = self.transform(image).type(torch.float32) assert image.round(decimals=1).max() <= 1.0, ( f"Image '{self.sem_rgb_filename[idx]}' is not normalized with max" f" value {image.max().item()}" ) if self.pair_augment[idx]: image = self.flip_transform.forward(image) return image """Get image in training""" def __len__(self): return len(self.depth_filename) def __getitem__(self, idx): """ Get batch items Returns: - depth_image: depth image - sem_rgb_image: semantic image - odom: odometry of the start pose (point and rotation) - goal: goal point in the camera frame - pair_augment: bool if the pair is augmented (flipped at the y-axis of the image) """ # get depth image if self.load_ram: depth_image = self.depth_imgs[idx] if self.semantics or self.rgb: sem_rgb_image = self.sem_imgs[idx] else: sem_rgb_image = 0 else: depth_image = self._load_depth_img(idx) if self.semantics or self.rgb: sem_rgb_image = self._load_sem_rgb_img(idx) else: sem_rgb_image = 0 return ( depth_image, sem_rgb_image, self.odom[idx], self.goal[idx], self.pair_augment[idx], ) class DistanceSchemeIdx: def __init__(self, distance: float) -> None: self.distance: float = distance self.odom_list: List[pp.LieTensor] = [] self.goal_list: List[pp.LieTensor] = [] self.pair_within_fov: List[bool] = [] self.pair_front_of_robot: List[bool] = [] self.pair_behind_robot: List[bool] = [] self.depth_img_list: List[str] = [] self.sem_rgb_img_list: List[str] = [] # flags self.has_data: bool = False return def update_buffers( self, odom: pp.LieTensor, goal: pp.LieTensor, within_fov: bool = False, front_of_robot: bool = False, behind_robot: bool = False, depth_filename: str = None, sem_rgb_filename: str = None, ) -> None: self.odom_list.append(odom) self.goal_list.append(goal) self.pair_within_fov.append(within_fov) self.pair_front_of_robot.append(front_of_robot) self.pair_behind_robot.append(behind_robot) self.depth_img_list.append(depth_filename) self.sem_rgb_img_list.append(sem_rgb_filename) self.has_data = len(self.odom_list) > 0 return def get_data( self, nb_fov: int, nb_front: int, nb_back: int, augment: bool = True, ) -> Tuple[List[pp.LieTensor], List[pp.LieTensor], List[str], List[str], np.ndarray,]: assert self.has_data, f"DistanceSchemeIdx for distance {self.distance} has no data" # get all pairs that are within the fov idx_fov = np.where(self.pair_within_fov)[0] idx_front = np.where(self.pair_front_of_robot)[0] idx_back = np.where(self.pair_behind_robot)[0] idx_augment = [] # augment pairs if not enough if len(idx_fov) == 0: print(f"[WARNING] for distance {self.distance} no 'within_fov'" " samples") idx_fov = np.array([], dtype=np.int64) elif len(idx_fov) < nb_fov: print( f"[INFO] for distance {self.distance} not enough 'within_fov'" f" samples ({len(idx_fov)} instead of {nb_fov})" ) if augment: idx_augment.append( np.random.choice( idx_fov, min(len(idx_fov), nb_fov - len(idx_fov)), replace=(nb_fov - len(idx_fov) > len(idx_fov)), ) ) else: idx_fov = np.random.choice(idx_fov, len(idx_fov), replace=False) else: idx_fov = np.random.choice(idx_fov, nb_fov, replace=False) if len(idx_front) == 0: print(f"[WARNING] for distance {self.distance} no 'front_of_robot'" " samples") idx_front = np.array([], dtype=np.int64) elif len(idx_front) < nb_front: print( f"[INFO] for distance {self.distance} not enough" f" 'front_of_robot' samples ({len(idx_front)} instead of" f" {nb_front})" ) if augment: idx_augment.append( np.random.choice( idx_front, min(len(idx_front), nb_front - len(idx_front)), replace=(nb_front - len(idx_front) > len(idx_front)), ) ) else: idx_front = np.random.choice(idx_front, len(idx_front), replace=False) else: idx_front = np.random.choice(idx_front, nb_front, replace=False) if len(idx_back) == 0: print(f"[WARNING] for distance {self.distance} no 'behind_robot'" " samples") idx_back = np.array([], dtype=np.int64) elif len(idx_back) < nb_back: print( f"[INFO] for distance {self.distance} not enough" f" 'behind_robot' samples ({len(idx_back)} instead of" f" {nb_back})" ) if augment: idx_augment.append( np.random.choice( idx_back, min(len(idx_back), nb_back - len(idx_back)), replace=(nb_back - len(idx_back) > len(idx_back)), ) ) else: idx_back = np.random.choice(idx_back, len(idx_back), replace=False) else: idx_back = np.random.choice(idx_back, nb_back, replace=False) idx = np.hstack([idx_fov, idx_front, idx_back]) # stack buffers odom = torch.stack(self.odom_list) goal = torch.stack(self.goal_list) # get pairs if idx_augment: idx_augment = np.hstack(idx_augment) odom = torch.vstack([odom[idx], odom[idx_augment]]) goal = torch.vstack( [ goal[idx], goal[idx_augment].tensor() * torch.tensor([[1, -1, 1, 1, 1, 1, 1]]), ] ) depth_img_list = [self.depth_img_list[j] for j in idx.tolist()] + [ self.depth_img_list[i] for i in idx_augment.tolist() ] sem_rgb_img_list = [self.sem_rgb_img_list[j] for j in idx.tolist()] + [ self.sem_rgb_img_list[i] for i in idx_augment.tolist() ] augment = np.hstack([np.zeros(len(idx)), np.ones(len(idx_augment))]) return odom, goal, depth_img_list, sem_rgb_img_list, augment else: return ( odom[idx], goal[idx], [self.depth_img_list[j] for j in idx.tolist()], [self.sem_rgb_img_list[j] for j in idx.tolist()], np.zeros(len(idx)), ) class PlannerDataGenerator(Dataset): debug = False mesh_size = 0.5 def __init__( self, cfg: DataCfg, root: str, semantics: bool = False, rgb: bool = False, cost_map: CostMapPCD = None, ) -> None: print( f"[INFO] PlannerDataGenerator init with semantics={semantics}," f" rgb={rgb} for ENV {os.path.split(root)[-1]}" ) # super().__init__() # set parameters self._cfg = cfg self.root = root self.cost_map = cost_map self.semantics = semantics self.rgb = rgb assert not (self.semantics and self.rgb), "semantics and rgb cannot be true at the same time" # init list for final odom, goal and img mapping self.depth_filename_list = [] self.sem_rgb_filename_list = [] self.odom_depth: torch.Tensor = None self.goal: torch.Tensor = None self.pair_outside: np.ndarray = None self.pair_difficult: np.ndarray = None self.pair_augment: np.ndarray = None self.pair_within_fov: np.ndarray = None self.pair_front_of_robot: np.ndarray = None self.odom_array_sem_rgb: pp.LieTensor = None self.odom_array_depth: pp.LieTensor = None self.odom_used: int = 0 self.odom_no_suitable_goals: int = 0 # set parameters self._device = "cuda:0" if torch.cuda.is_available() else "cpu" # get odom data and filter self.load_odom() self.filter_obs_inflation() # noise edges in depth image --> real world Realsense difficulties along edges if self._cfg.noise_edges: self.noise_edges() # find odom-goal pairs self.get_odom_goal_pairs() return """LOAD HELPER FUNCTIONS""" def load_odom(self) -> None: print("[INFO] Loading odom data...", end=" ") # load odom of every image odom_path = os.path.join(self.root, f"camera_extrinsic{self._cfg.depth_suffix}.txt") odom_np = np.loadtxt(odom_path, delimiter=",") self.odom_array_depth = pp.SE3(odom_np) if self.semantics or self.rgb: odom_path = os.path.join(self.root, f"camera_extrinsic{self._cfg.sem_suffix}.txt") odom_np = np.loadtxt(odom_path, delimiter=",") self.odom_array_sem_rgb = pp.SE3(odom_np) if self.debug: # plot odom small_sphere = o3d.geometry.TriangleMesh.create_sphere(self.mesh_size / 3.0) # successful trajectory points small_sphere.paint_uniform_color([0.4, 1.0, 0.1]) odom_vis_list = [] for i in range(len(self.odom_array_depth)): odom_vis_list.append( copy.deepcopy(small_sphere).translate( ( self.odom_array_depth[i, 0], self.odom_array_depth[i, 1], self.odom_array_depth[i, 2], ) ) ) odom_vis_list.append(self.cost_map.pcd_tsdf) o3d.visualization.draw_geometries(odom_vis_list) print("DONE!") return def load_images(self, root_path, domain: str = "depth"): img_path = os.path.join(root_path, domain) assert os.path.isdir(img_path), f"Image directory path '{img_path}' does not exist for domain" f" {domain}" assert len(os.listdir(img_path)) > 0, f"Image directory '{img_path}' is empty for domain {domain}" # use the more precise npy files if available img_filename_list = [str(s) for s in Path(img_path).rglob("*.npy")] if len(img_filename_list) == 0: img_filename_list = [str(s) for s in Path(img_path).rglob("*.png")] if domain == "depth": img_filename_list.sort(key=lambda x: int(x.split("/")[-1][: -(4 + len(self._cfg.depth_suffix))])) else: img_filename_list.sort(key=lambda x: int(x.split("/")[-1][: -(4 + len(self._cfg.sem_suffix))])) return img_filename_list """FILTER HELPER FUNCTIONS""" def filter_obs_inflation(self) -> None: """ Filter odom points within the inflation range of the obstacles in the cost map. Filtering only performed according to the position of the depth camera, due to the close position of depth and semantic camera. """ print( ("[INFO] Filter odom points within the inflation range of the" " obstacles in the cost map..."), end="", ) norm_inds, _ = self.cost_map.Pos2Ind(self.odom_array_depth[:, None, :3]) cost_grid = self.cost_map.cost_array.T.expand(self.odom_array_depth.shape[0], 1, -1, -1) norm_inds = norm_inds.to(cost_grid.device) oloss_M = ( F.grid_sample( cost_grid, norm_inds[:, None, :, :], mode="bicubic", padding_mode="border", align_corners=False, ) .squeeze(1) .squeeze(1) ) oloss_M = oloss_M.to(torch.float32).to("cpu") if self.semantics or self.rgb: points_free_space = oloss_M < self._cfg.obs_cost_height + abs( self.cost_map.cfg.sem_cost_map.negative_reward ) else: points_free_space = oloss_M < self._cfg.obs_cost_height if self._cfg.carla: # for CARLA filter large open spaces # Extract the x and y coordinates from the odom poses x_coords = self.odom_array_depth.tensor()[:, 0] y_coords = self.odom_array_depth.tensor()[:, 1] # Filter the point cloud based on the square coordinates mask_area_1 = (y_coords >= 100.5) & (y_coords <= 325.5) & (x_coords >= 208.9) & (x_coords <= 317.8) mask_area_2 = (y_coords >= 12.7) & (y_coords <= 80.6) & (x_coords >= 190.3) & (x_coords <= 315.8) mask_area_3 = (y_coords >= 10.0) & (y_coords <= 80.0) & (x_coords >= 123.56) & (x_coords <= 139.37) combined_mask = mask_area_1 | mask_area_2 | mask_area_3 | ~points_free_space.squeeze(1) points_free_space = (~combined_mask).unsqueeze(1) if self.debug: # plot odom odom_vis_list = [] small_sphere = o3d.geometry.TriangleMesh.create_sphere(self.mesh_size / 3.0) # successful trajectory points for i in range(len(self.odom_array_depth)): if round(oloss_M[i].item(), 3) == 0.0: small_sphere.paint_uniform_color([0.4, 0.1, 1.0]) # violette elif points_free_space[i]: small_sphere.paint_uniform_color([0.4, 1.0, 0.1]) # green else: small_sphere.paint_uniform_color([1.0, 0.4, 0.1]) # red if self.semantics or self.rgb: z_height = self.odom_array_depth.tensor()[i, 2] + abs( self.cost_map.cfg.sem_cost_map.negative_reward ) else: z_height = self.odom_array_depth.tensor()[i, 2] odom_vis_list.append( copy.deepcopy(small_sphere).translate( ( self.odom_array_depth.tensor()[i, 0], self.odom_array_depth.tensor()[i, 1], z_height, ) ) ) odom_vis_list.append(self.cost_map.pcd_tsdf) o3d.visualization.draw_geometries(odom_vis_list) nb_odom_point_prev = len(self.odom_array_depth) self.odom_array_depth = self.odom_array_depth[points_free_space.squeeze()] self.nb_odom_points = self.odom_array_depth.shape[0] # load depth image files as name list depth_filename_list = self.load_images(self.root, "depth") self.depth_filename_list = [ depth_filename_list[i] for i in range(len(depth_filename_list)) if points_free_space[i] ] if self.semantics: self.odom_array_sem_rgb = self.odom_array_sem_rgb[points_free_space.squeeze()] sem_rgb_filename_list = self.load_images(self.root, "semantics") self.sem_rgb_filename_list = [ sem_rgb_filename_list[i] for i in range(len(sem_rgb_filename_list)) if points_free_space[i] ] elif self.rgb: self.odom_array_sem_rgb = self.odom_array_sem_rgb[points_free_space.squeeze()] sem_rgb_filename_list = self.load_images(self.root, "rgb") self.sem_rgb_filename_list = [ sem_rgb_filename_list[i] for i in range(len(sem_rgb_filename_list)) if points_free_space[i] ] assert len(self.depth_filename_list) != 0, "No depth images left after filtering" print("DONE!") print( "[INFO] odom points outside obs inflation :" f" \t{self.nb_odom_points} ({round(self.nb_odom_points/nb_odom_point_prev*100, 2)} %)" ) return """GENERATE SAMPLES""" def get_odom_goal_pairs(self) -> None: # get fov self.get_intrinscs_and_fov() # construct graph self.get_graph() # get pairs self.get_pairs() # free up memory self.odom_array_depth = self.odom_array_sem_rgb = None return def compute_ratios(self) -> Tuple[float, float, float]: # ratio of general samples distribution num_within_fov = self.odom_depth[self.pair_within_fov].shape[0] ratio_fov = num_within_fov / self.odom_depth.shape[0] ratio_front = np.sum(self.pair_front_of_robot) / self.odom_depth.shape[0] ratio_back = 1 - ratio_front - ratio_fov # samples ratios within fov samples num_easy = ( num_within_fov - self.pair_difficult[self.pair_within_fov].sum().item() - self.pair_outside[self.pair_within_fov].sum().item() ) ratio_easy = num_easy / num_within_fov ratio_hard = self.pair_difficult[self.pair_within_fov].sum().item() / num_within_fov ratio_outside = self.pair_outside[self.pair_within_fov].sum().item() / num_within_fov return ( ratio_fov, ratio_front, ratio_back, ratio_easy, ratio_hard, ratio_outside, ) def get_intrinscs_and_fov(self) -> None: # load intrinsics intrinsic_path = os.path.join(self.root, "intrinsics.txt") P = np.loadtxt(intrinsic_path, delimiter=",") # assumes ROS P matrix self.K_depth = P[0].reshape(3, 4)[:3, :3] self.K_sem_rgb = P[1].reshape(3, 4)[:3, :3] self.alpha_fov = 2 * math.atan(self.K_depth[0, 0] / self.K_depth[0, 2]) return def get_graph(self) -> None: num_connections = 3 num_intermediate = 3 # get occpuancy map from tsdf map cost_array = self.cost_map.tsdf_array.cpu().numpy() if self.semantics or self.rgb: occupancy_map = ( cost_array > self._cfg.obs_cost_height + abs(self.cost_map.cfg.sem_cost_map.negative_reward) ).astype(np.uint8) else: occupancy_map = (cost_array > self._cfg.obs_cost_height).astype(np.uint8) # construct kdtree to find nearest neighbors of points odom_points = self.odom_array_depth.data[:, :2].data.cpu().numpy() kdtree = KDTree(odom_points) _, nearest_neighbors_idx = kdtree.query(odom_points, k=num_connections + 1, workers=-1) # remove first neighbor as it is the point itself nearest_neighbors_idx = nearest_neighbors_idx[:, 1:] # define origin and neighbor points origin_point = np.repeat(odom_points, repeats=num_connections, axis=0) neighbor_points = odom_points[nearest_neighbors_idx, :].reshape(-1, 2) # interpolate points between origin and neighbor points x_interp = ( origin_point[:, None, 0] + (neighbor_points[:, 0] - origin_point[:, 0])[:, None] * np.linspace(0, 1, num=num_intermediate + 1, endpoint=False)[1:] ) y_interp = ( origin_point[:, None, 1] + (neighbor_points[:, 1] - origin_point[:, 1])[:, None] * np.linspace(0, 1, num=num_intermediate + 1, endpoint=False)[1:] ) inter_points = np.stack((x_interp.reshape(-1), y_interp.reshape(-1)), axis=1) # get the indices of the interpolated points in the occupancy map occupancy_idx = ( inter_points - np.array([self.cost_map.cfg.x_start, self.cost_map.cfg.y_start]) ) / self.cost_map.cfg.general.resolution # check occupancy for collisions at the interpolated points collision = occupancy_map[ occupancy_idx[:, 0].astype(np.int64), occupancy_idx[:, 1].astype(np.int64), ] collision = np.any(collision.reshape(-1, num_intermediate), axis=1) # get edge indices idx_edge_start = np.repeat(np.arange(odom_points.shape[0]), repeats=num_connections, axis=0) idx_edge_end = nearest_neighbors_idx.reshape(-1) # filter collision edges idx_edge_end = idx_edge_end[~collision] idx_edge_start = idx_edge_start[~collision] # init graph self.graph = nx.Graph() # add nodes with position attributes self.graph.add_nodes_from(list(range(odom_points.shape[0]))) pos_attr = {i: {"pos": odom_points[i]} for i in range(odom_points.shape[0])} nx.set_node_attributes(self.graph, pos_attr) # add edges with distance attributes self.graph.add_edges_from(list(map(tuple, np.stack((idx_edge_start, idx_edge_end), axis=1)))) distance_attr = { (i, j): {"distance": np.linalg.norm(odom_points[i] - odom_points[j])} for i, j in zip(idx_edge_start, idx_edge_end) } nx.set_edge_attributes(self.graph, distance_attr) # DEBUG if self.debug: import matplotlib.pyplot as plt nx.draw_networkx( self.graph, nx.get_node_attributes(self.graph, "pos"), node_size=10, with_labels=False, node_color=[0.0, 1.0, 0.0], ) plt.show() return def get_pairs(self): # iterate over all odom points and find goal points self.odom_no_suitable_goals = 0 self.odom_used = 0 # init semantic warp parameters if self.semantics or self.rgb: # compute pixel tensor depth_filename = self.depth_filename_list[0] depth_img = self._load_depth_image(depth_filename) x_nums, y_nums = depth_img.shape self.pix_depth_cam_frame = self.compute_pixel_tensor(x_nums, y_nums, self.K_depth) # make dir os.makedirs(os.path.join(self.root, "img_warp"), exist_ok=True) # get distances between odom and goal points odom_goal_distances = dict( nx.all_pairs_dijkstra_path_length( self.graph, cutoff=self._cfg.max_goal_distance, weight="distance", ) ) # init dataclass for each entry in the distance scheme self.category_scheme_pairs: Dict[float, DistanceSchemeIdx] = { distance: DistanceSchemeIdx(distance=distance) for distance in self._cfg.distance_scheme.keys() } # iterate over all odom points for odom_idx in tqdm(range(self.nb_odom_points), desc="Start-End Pairs Generation"): odom = self.odom_array_depth[odom_idx] # transform all odom points to current odom frame goals = pp.Inv(odom) @ self.odom_array_depth # categorize goals ( within_fov, front_of_robot, behind_robot, ) = self.get_goal_categories( goals ) # returns goals in odom frame # filter odom if no suitable goals within the fov are found if within_fov.sum() == 0: self.odom_no_suitable_goals += 1 continue self.odom_used += 1 if self.semantics or self.rgb: # semantic warp img_new_path = self._get_overlay_img(odom_idx) else: img_new_path = None # get pair according to distance scheme for each category self.reduce_pairs( odom_idx, goals, within_fov, odom_goal_distances[odom_idx], img_new_path, within_fov=True, ) self.reduce_pairs( odom_idx, goals, behind_robot, odom_goal_distances[odom_idx], img_new_path, behind_robot=True, ) self.reduce_pairs( odom_idx, goals, front_of_robot, odom_goal_distances[odom_idx], img_new_path, front_of_robot=True, ) # DEBUG if self.debug: # plot odom small_sphere = o3d.geometry.TriangleMesh.create_sphere( self.mesh_size / 3.0 ) # successful trajectory points odom_vis_list = [] goal_odom = odom @ goals hit_pcd = (goal_odom).cpu().numpy()[:, :3] for idx, pts in enumerate(hit_pcd): if within_fov[idx]: small_sphere.paint_uniform_color([0.4, 1.0, 0.1]) elif front_of_robot[idx]: small_sphere.paint_uniform_color([0.0, 0.5, 0.5]) else: small_sphere.paint_uniform_color([0.0, 0.1, 1.0]) odom_vis_list.append(copy.deepcopy(small_sphere).translate((pts[0], pts[1], pts[2]))) # viz cost map odom_vis_list.append(self.cost_map.pcd_tsdf) # field of view visualization fov_vis_length = 0.75 # length of the fov visualization plane in meters fov_vis_pt_right = odom @ pp.SE3( [ fov_vis_length * np.cos(self.alpha_fov / 2), fov_vis_length * np.sin(self.alpha_fov / 2), 0, 0, 0, 0, 1, ] ) fov_vis_pt_left = odom @ pp.SE3( [ fov_vis_length * np.cos(self.alpha_fov / 2), -fov_vis_length * np.sin(self.alpha_fov / 2), 0, 0, 0, 0, 1, ] ) fov_vis_pt_right = fov_vis_pt_right.numpy()[:3] fov_vis_pt_left = fov_vis_pt_left.numpy()[:3] fov_mesh = o3d.geometry.TriangleMesh( vertices=o3d.utility.Vector3dVector( np.array( [ odom.data.cpu().numpy()[:3], fov_vis_pt_right, fov_vis_pt_left, ] ) ), triangles=o3d.utility.Vector3iVector(np.array([[2, 1, 0]])), ) fov_mesh.paint_uniform_color([1.0, 0.5, 0.0]) odom_vis_list.append(fov_mesh) # odom viz small_sphere.paint_uniform_color([1.0, 0.0, 0.0]) odom_vis_list.append( copy.deepcopy(small_sphere).translate( ( odom.data[0].item(), odom.data[1].item(), odom.data[2].item(), ) ) ) # plot goal o3d.visualization.draw_geometries(odom_vis_list) if self.debug: small_sphere = o3d.geometry.TriangleMesh.create_sphere(self.mesh_size / 3.0) # successful trajectory points odom_vis_list = [] for distance in self._cfg.distance_scheme.keys(): odoms = torch.vstack(self.category_scheme_pairs[distance].odom_list) odoms = odoms.tensor().cpu().numpy()[:, :3] for idx, odom in enumerate(odoms): odom_vis_list.append(copy.deepcopy(small_sphere).translate((odom[0], odom[1], odom[2]))) if idx > 10: break # viz cost map odom_vis_list.append(self.cost_map.pcd_tsdf) # plot goal o3d.visualization.draw_geometries(odom_vis_list) return def reduce_pairs( self, odom_idx: int, goals: pp.LieTensor, decision_tensor: torch.Tensor, odom_distances: dict, warp_img_path: Optional[str], within_fov: bool = False, behind_robot: bool = False, front_of_robot: bool = False, ): # remove all goals depending on the decision tensor from the odom_distances dict keep_distance_entries = decision_tensor[list(odom_distances.keys())] distances = np.array(list(odom_distances.values()))[keep_distance_entries.numpy()] goal_idx = np.array(list(odom_distances.keys()))[keep_distance_entries.numpy()] # max distance enforced odom_distances, here enforce min distance within_distance_idx = distances > self._cfg.min_goal_distance goal_idx = goal_idx[within_distance_idx] distances = distances[within_distance_idx] # check if there are any goals left if len(goal_idx) == 0: return # select the goal according to the distance_scheme for distance in self._cfg.distance_scheme.keys(): # select nbr_samples from goals within distance within_curr_distance_idx = distances < distance if sum(within_curr_distance_idx) == 0: continue selected_idx = np.random.choice( goal_idx[within_curr_distance_idx], min(3, sum(within_curr_distance_idx)), replace=False, ) # remove the selected goals from the list for further selection distances = distances[~within_curr_distance_idx] goal_idx = goal_idx[~within_curr_distance_idx] for idx in selected_idx: self.category_scheme_pairs[distance].update_buffers( odom=self.odom_array_depth[odom_idx], goal=goals[idx], within_fov=within_fov, front_of_robot=front_of_robot, behind_robot=behind_robot, depth_filename=self.depth_filename_list[odom_idx], sem_rgb_filename=warp_img_path, ) def get_goal_categories(self, goal_odom_frame: pp.LieTensor): """ Decide which of the samples are within the fov, in front of the robot or behind the robot. """ # get if odom-goal is within fov or outside the fov but still in front of the robot goal_angle = abs(torch.atan2(goal_odom_frame.data[:, 1], goal_odom_frame.data[:, 0])) within_fov = goal_angle < self.alpha_fov / 2 * self._cfg.fov_scale front_of_robot = goal_angle < torch.pi / 2 front_of_robot[within_fov] = False behind_robot = ~front_of_robot.clone() behind_robot[within_fov] = False return within_fov, front_of_robot, behind_robot """SPLIT HELPER FUNCTIONS""" def split_samples( self, test_dataset: PlannerData, train_dataset: Optional[PlannerData] = None, generate_split: bool = False, ratio_fov_samples: Optional[float] = None, ratio_front_samples: Optional[float] = None, ratio_back_samples: Optional[float] = None, allow_augmentation: bool = True, ) -> None: # check if ratios are given or defaults are used ratio_fov_samples = ratio_fov_samples if ratio_fov_samples is not None else self._cfg.ratio_fov_samples ratio_front_samples = ratio_front_samples if ratio_front_samples is not None else self._cfg.ratio_front_samples ratio_back_samples = ratio_back_samples if ratio_back_samples is not None else self._cfg.ratio_back_samples assert round(ratio_fov_samples + ratio_front_samples + ratio_back_samples, 2) == 1.0, ( "Sample ratios must sum up to 1.0, currently" f" {ratio_back_samples + ratio_front_samples + ratio_fov_samples}" ) # max sample number if self._cfg.max_train_pairs: max_sample_number = min( int(self._cfg.max_train_pairs / self._cfg.ratio), int(self.odom_used * self._cfg.pairs_per_image), ) else: max_sample_number = int(self.odom_used * self._cfg.pairs_per_image) # init buffers odom = torch.zeros((max_sample_number, 7), dtype=torch.float32) goal = torch.zeros((max_sample_number, 7), dtype=torch.float32) augment_samples = np.zeros((max_sample_number), dtype=bool) depth_filename = [] sem_rgb_filename = [] current_idx = 0 for distance, distance_percentage in self._cfg.distance_scheme.items(): if not self.category_scheme_pairs[distance].has_data: print(f"[WARN] No samples for distance {distance} in ENV" f" {os.path.split(self.root)[-1]}") continue # get number of samples buffer_data = self.category_scheme_pairs[distance].get_data( nb_fov=int(ratio_fov_samples * distance_percentage * max_sample_number), nb_front=int(ratio_front_samples * distance_percentage * max_sample_number), nb_back=int(ratio_back_samples * distance_percentage * max_sample_number), augment=allow_augmentation, ) nb_samples = buffer_data[0].shape[0] # add to buffers odom[current_idx : current_idx + nb_samples] = buffer_data[0] goal[current_idx : current_idx + nb_samples] = buffer_data[1] depth_filename += buffer_data[2] sem_rgb_filename += buffer_data[3] augment_samples[current_idx : current_idx + nb_samples] = buffer_data[4] current_idx += nb_samples # cut off unused space odom = odom[:current_idx] goal = goal[:current_idx] augment_samples = augment_samples[:current_idx] # print data mix print( f"[INFO] datamix containing {odom.shape[0]} suitable odom-goal" " pairs: \n" "\t fov :" f" \t{int(odom.shape[0] * ratio_fov_samples) } ({round(ratio_fov_samples*100, 2)} %) \n" "\t front of robot :" f" \t{int(odom.shape[0] * ratio_front_samples)} ({round(ratio_front_samples*100, 2)} %) \n" "\t back of robot :" f" \t{int(odom.shape[0] * ratio_back_samples) } ({round(ratio_back_samples*100, 2)} %) \n" "from" f" {self.odom_used} ({round(self.odom_used/self.nb_odom_points*100, 2)} %)" " different starting points where \n" "\t non-suitable filter:" f" {self.odom_no_suitable_goals} ({round(self.odom_no_suitable_goals/self.nb_odom_points*100, 2)} %)" ) # generate split idx = np.arange(odom.shape[0]) if generate_split: train_index = sample(idx.tolist(), int(len(idx) * self._cfg.ratio)) idx = np.delete(idx, train_index) train_dataset.update_buffers( depth_filename=[depth_filename[i] for i in train_index], sem_rgb_filename=([sem_rgb_filename[i] for i in train_index] if (self.semantics or self.rgb) else None), odom=odom[train_index], goal=goal[train_index], pair_augment=augment_samples[train_index], ) train_dataset.set_fov(self.alpha_fov) test_dataset.update_buffers( depth_filename=[depth_filename[i] for i in idx], sem_rgb_filename=([sem_rgb_filename[i] for i in idx] if (self.semantics or self.rgb) else None), odom=odom[idx], goal=goal[idx], pair_augment=augment_samples[idx], ) test_dataset.set_fov(self.alpha_fov) return """ Warp semantic on depth image helper functions""" @staticmethod def compute_pixel_tensor(x_nums: int, y_nums: int, K_depth: np.ndarray) -> None: # get image plane mesh grid pix_u = np.arange(0, y_nums) pix_v = np.arange(0, x_nums) grid = np.meshgrid(pix_u, pix_v) pixels = np.vstack(list(map(np.ravel, grid))).T pixels = np.hstack([pixels, np.ones((len(pixels), 1))]) # add ones for 3D coordinates # transform to camera frame k_inv = np.linalg.inv(K_depth) pix_cam_frame = np.matmul(k_inv, pixels.T) # reorder to be in "robotics" axis order (x forward, y left, z up) return pix_cam_frame[[2, 0, 1], :].T * np.array([1, -1, -1]) def _load_depth_image(self, depth_filename): if depth_filename.endswith(".png"): depth_image = Image.open(depth_filename) if self._cfg.real_world_data: depth_image = np.array(depth_image.transpose(PIL.Image.ROTATE_180)) else: depth_image = np.array(depth_image) else: depth_image = np.load(depth_filename) depth_image[~np.isfinite(depth_image)] = 0.0 depth_image = (depth_image / self._cfg.depth_scale).astype("float32") depth_image[depth_image > self._cfg.max_depth] = 0.0 return depth_image @staticmethod def compute_overlay( pose_dep, pose_sem, depth_img, sem_rgb_image, pix_depth_cam_frame, K_sem_rgb, ): # get 3D points of depth image rot = tf.Rotation.from_quat(pose_dep[3:]).as_matrix() dep_im_reshaped = depth_img.reshape( -1, 1 ) # flip s.t. start in lower left corner of image as (0,0) -> has to fit to the pixel tensor points = dep_im_reshaped * (rot @ pix_depth_cam_frame.T).T + pose_dep[:3] # transform points to semantic camera frame points_sem_cam_frame = (tf.Rotation.from_quat(pose_sem[3:]).as_matrix().T @ (points - pose_sem[:3]).T).T # normalize points points_sem_cam_frame_norm = points_sem_cam_frame / points_sem_cam_frame[:, 0][:, np.newaxis] # reorder points be camera convention (z-forward) points_sem_cam_frame_norm = points_sem_cam_frame_norm[:, [1, 2, 0]] * np.array([-1, -1, 1]) # transform points to pixel coordinates pixels = (K_sem_rgb @ points_sem_cam_frame_norm.T).T # filter points outside of image filter_idx = ( (pixels[:, 0] >= 0) & (pixels[:, 0] < sem_rgb_image.shape[1]) & (pixels[:, 1] >= 0) & (pixels[:, 1] < sem_rgb_image.shape[0]) ) # get semantic annotation sem_annotation = np.zeros((pixels.shape[0], 3), dtype=np.uint8) sem_annotation[filter_idx] = sem_rgb_image[ pixels[filter_idx, 1].astype(int), pixels[filter_idx, 0].astype(int), ] # reshape to image return sem_annotation.reshape(depth_img.shape[0], depth_img.shape[1], 3) def _get_overlay_img(self, odom_idx): # get corresponding filenames depth_filename = self.depth_filename_list[odom_idx] sem_rgb_filename = self.sem_rgb_filename_list[odom_idx] # load semantic and depth image and get their poses depth_img = self._load_depth_image(depth_filename) sem_rgb_image = Image.open(sem_rgb_filename) if self._cfg.real_world_data: sem_rgb_image = np.array(sem_rgb_image.transpose(PIL.Image.ROTATE_180)) else: sem_rgb_image = np.array(sem_rgb_image) pose_dep = self.odom_array_depth[odom_idx].data.cpu().numpy() pose_sem = self.odom_array_sem_rgb[odom_idx].data.cpu().numpy() sem_rgb_image_warped = self.compute_overlay( pose_dep, pose_sem, depth_img, sem_rgb_image, self.pix_depth_cam_frame, self.K_sem_rgb, ) assert sem_rgb_image_warped.dtype == np.uint8, "sem_rgb_image_warped has to be uint8" # DEBUG if self.debug: import matplotlib.pyplot as plt f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5)) ax1.imshow(depth_img) ax2.imshow(sem_rgb_image_warped / 255) ax3.imshow(sem_rgb_image) # ax3.imshow(depth_img) # ax3.imshow(sem_rgb_image_warped / 255, alpha=0.5) ax1.axis("off") ax2.axis("off") ax3.axis("off") plt.show() # save semantic image under the new path sem_rgb_filename = os.path.split(sem_rgb_filename)[1] sem_rgb_image_path = os.path.join(self.root, "img_warp", sem_rgb_filename) sem_rgb_image_warped = cv2.cvtColor(sem_rgb_image_warped, cv2.COLOR_RGB2BGR) # convert to BGR for cv2 assert cv2.imwrite(sem_rgb_image_path, sem_rgb_image_warped) return sem_rgb_image_path """Noise Edges helper functions""" def noise_edges(self): """ Along the edges in the depth image, set the values to 0. Mimics the real-world behavior where RealSense depth cameras have difficulties along edges. """ print("[INFO] Adding noise to edges in depth images ...", end=" ") new_depth_filename_list = [] # create new directory depth_noise_edge_dir = os.path.join(self.root, "depth_noise_edges") os.makedirs(depth_noise_edge_dir, exist_ok=True) for depth_filename in self.depth_filename_list: depth_img = self._load_depth_image(depth_filename) # Perform Canny edge detection image = ((depth_img / depth_img.max()) * 255).astype(np.uint8) # convert to CV_U8 format edges = cv2.Canny(image, self._cfg.edge_threshold, self._cfg.edge_threshold * 3) # Dilate the edges to extend their space kernel = np.ones(self._cfg.extend_kernel_size, np.uint8) dilated_edges = cv2.dilate(edges, kernel, iterations=1) # Erode the edges to refine their shape eroded_edges = cv2.erode(dilated_edges, kernel, iterations=1) # modify depth image depth_img[eroded_edges == 255] = 0.0 # save depth image depth_img = (depth_img * self._cfg.depth_scale).astype("uint16") if depth_filename.endswith(".png"): assert cv2.imwrite( os.path.join(depth_noise_edge_dir, os.path.split(depth_filename)[1]), depth_img, ) else: np.save( os.path.join(depth_noise_edge_dir, os.path.split(depth_filename)[1]), depth_img, ) new_depth_filename_list.append(os.path.join(depth_noise_edge_dir, os.path.split(depth_filename)[1])) self.depth_filename_list = new_depth_filename_list print("Done!") return """ Cleanup Script for files generated by this class""" def cleanup(self): print( ("[INFO] Cleaning up for environment" f" {os.path.split(self.root)[1]} ..."), end=" ", ) # remove semantic_warp directory if os.path.isdir(os.path.join(self.root, "img_warp")): shutil.rmtree(os.path.join(self.root, "img_warp")) # remove depth_noise_edges directory if os.path.isdir(os.path.join(self.root, "depth_noise_edges")): shutil.rmtree(os.path.join(self.root, "depth_noise_edges")) print("Done!") return # EoF
51,127
Python
38.481081
135
0.542707
leggedrobotics/viplanner/viplanner/utils/torchutil.py
# Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab) # Author: Pascal Roth # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import torch import torch.fft class EarlyStopScheduler(torch.optim.lr_scheduler.ReduceLROnPlateau): def __init__( self, optimizer, mode="min", factor=0.1, patience=10, verbose=False, threshold=1e-4, threshold_mode="rel", cooldown=0, min_lr=0, eps=1e-8, ): super().__init__( optimizer=optimizer, mode=mode, factor=factor, patience=patience, threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, eps=eps, verbose=verbose, ) self.no_decrease = 0 def step(self, metrics, epoch=None): # convert `metrics` to float, in case it's a zero-dim Tensor current = float(metrics) if epoch is None: epoch = self.last_epoch = self.last_epoch + 1 self.last_epoch = epoch if self.is_better(current, self.best): self.best = current self.num_bad_epochs = 0 else: self.num_bad_epochs += 1 if self.in_cooldown: self.cooldown_counter -= 1 self.num_bad_epochs = 0 # ignore any bad epochs in cooldown if self.num_bad_epochs > self.patience: self.cooldown_counter = self.cooldown self.num_bad_epochs = 0 self._reduce_lr(epoch) def _reduce_lr(self, epoch): for i, param_group in enumerate(self.optimizer.param_groups): old_lr = float(param_group["lr"]) new_lr = max(old_lr * self.factor, self.min_lrs[i]) if old_lr - new_lr > self.eps: param_group["lr"] = new_lr if self.verbose: print("Epoch {:5d}: reducing learning rate" " of group {} to {:.4e}.".format(epoch, i, new_lr)) return False else: return True def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad)
2,233
Python
28.394736
115
0.543215
leggedrobotics/viplanner/viplanner/utils/trainer.py
# Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab) # Author: Pascal Roth # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import contextlib # python import os from typing import List, Optional, Tuple import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as Data import torchvision.transforms as transforms import tqdm import wandb # logging import yaml # imperative-planning-learning from viplanner.config import TrainCfg from viplanner.plannernet import ( PRE_TRAIN_POSSIBLE, AutoEncoder, DualAutoEncoder, get_m2f_cfg, ) from viplanner.traj_cost_opt import TrajCost, TrajViz from viplanner.utils.torchutil import EarlyStopScheduler, count_parameters from .dataset import PlannerData, PlannerDataGenerator torch.set_default_dtype(torch.float32) class Trainer: """ VIPlanner Trainer """ def __init__(self, cfg: TrainCfg) -> None: self._cfg = cfg # set model save/load path os.makedirs(self._cfg.curr_model_dir, exist_ok=True) self.model_path = os.path.join(self._cfg.curr_model_dir, "model.pt") if self._cfg.hierarchical: self.model_dir_hierarch = os.path.join(self._cfg.curr_model_dir, "hierarchical") os.makedirs(self.model_dir_hierarch, exist_ok=True) self.hierach_losses = {} # image transforms self.transform = transforms.Compose( [ transforms.ToTensor(), transforms.Resize((self._cfg.img_input_size), antialias=True), ] ) # init buffers DATA self.data_generators: List[PlannerDataGenerator] = [] self.data_traj_cost: List[TrajCost] = [] self.data_traj_viz: List[TrajViz] = [] self.fov_ratio: float = None self.front_ratio: float = None self.back_ratio: float = None self.pixel_mean: np.ndarray = None self.pixel_std: np.ndarray = None # inti buffers MODEL self.best_loss = float("inf") self.test_loss = float("inf") self.net: nn.Module = None self.optimizer: optim.Optimizer = None self.scheduler: EarlyStopScheduler = None print("[INFO] Trainer initialized") return """PUBLIC METHODS""" def train(self) -> None: print("[INFO] Start Training") # init logging self._init_logging() # load model and prepare model for training self._load_model(self._cfg.resume) self._configure_optimizer() # get dataloader for training self._load_data(train=True) if self._cfg.hierarchical: step_counter = 0 train_loader_list, val_loader_list = self._get_dataloader(step=step_counter) else: train_loader_list, val_loader_list = self._get_dataloader() try: wandb.watch(self.net) except: # noqa: E722 print("[WARNING] Wandb model watch failed") for epoch in range(self._cfg.epochs): train_loss = 0 val_loss = 0 for i in range(len(train_loader_list)): train_loss += self._train_epoch(train_loader_list[i], epoch, env_id=i) val_loss += self._test_epoch(val_loader_list[i], env_id=i, epoch=epoch) train_loss /= len(train_loader_list) val_loss /= len(train_loader_list) try: wandb.log( { "train_loss": train_loss, "val_loss": val_loss, "epoch": epoch, } ) except: # noqa: E722 print("[WARNING] Wandb logging failed") # if val_loss < best_loss: if val_loss < self.best_loss: print("[INFO] Save model of epoch %d" % (epoch)) torch.save((self.net.state_dict(), val_loss), self.model_path) self.best_loss = val_loss print("[INFO] Current val loss: %.4f" % (self.best_loss)) if self.scheduler.step(val_loss): print("[INFO] Early Stopping!") break if self._cfg.hierarchical and (epoch + 1) % self._cfg.hierarchical_step == 0: torch.save( (self.net.state_dict(), self.best_loss), os.path.join( self.model_dir_hierarch, ( f"model_ep{epoch}_fov{round(self.fov_ratio, 3)}_" f"front{round(self.front_ratio, 3)}_" f"back{round(self.back_ratio, 3)}.pt" ), ), ) step_counter += 1 train_loader_list, val_loader_list = self._get_dataloader(step=step_counter) self.hierach_losses[epoch] = self.best_loss torch.cuda.empty_cache() # cleanup data for generator in self.data_generators: generator.cleanup() # empty buffers self.data_generators = [] self.data_traj_cost = [] self.data_traj_viz = [] return def test(self, step: Optional[int] = None) -> None: print("[INFO] Start Training") # set random seed for reproducibility torch.manual_seed(self._cfg.seed) # define step if step is None and self._cfg.hierarchical: step = int(self._cfg.epochs / self._cfg.hierarchical_step) # load model self._load_model(resume=True) # get dataloader for training self._load_data(train=False) _, test_loader = self._get_dataloader(train=False, step=step) self.test_loss = self._test_epoch( test_loader[0], env_id=0, is_visual=not os.getenv("EXPERIMENT_DIRECTORY"), fov_angle=self.data_generators[0].alpha_fov, dataset="test", ) # cleanup data for generator in self.data_generators: generator.cleanup() def save_config(self) -> None: print(f"[INFO] val_loss: {self.best_loss:.2f}, test_loss," f"{self.test_loss:.4f}") """ Save config and loss to file""" path, _ = os.path.splitext(self.model_path) yaml_path = path + ".yaml" print(f"[INFO] Save config and loss to {yaml_path} file") loss_dict = {"val_loss": self.best_loss, "test_loss": self.test_loss} save_dict = {"config": vars(self._cfg), "loss": loss_dict} # dump yaml with open(yaml_path, "w+") as file: yaml.dump(save_dict, file, allow_unicode=True, default_flow_style=False) # logging with contextlib.suppress(Exception): wandb.finish() # plot hierarchical losses if self._cfg.hierarchical: plt.figure(figsize=(10, 10)) plt.plot( list(self.hierach_losses.keys()), list(self.hierach_losses.values()), ) plt.xlabel("Epoch") plt.ylabel("Validation Loss") plt.title("Hierarchical Losses") plt.savefig(os.path.join(self.model_dir_hierarch, "hierarchical_losses.png")) plt.close() return """PRIVATE METHODS""" # Helper function DATA def _load_data(self, train: bool = True) -> None: if not isinstance(self._cfg.data_cfg, list): self._cfg.data_cfg = [self._cfg.data_cfg] * len(self._cfg.env_list) assert len(self._cfg.data_cfg) == len(self._cfg.env_list), ( "Either single DataCfg or number matching number of environments" "must be provided" ) for idx, env_name in enumerate(self._cfg.env_list): if (train and idx == self._cfg.test_env_id) or (not train and idx != self._cfg.test_env_id): continue data_path = os.path.join(self._cfg.data_dir, env_name) # get trajectory cost map traj_cost = TrajCost( self._cfg.gpu_id, log_data=train, w_obs=self._cfg.w_obs, w_height=self._cfg.w_height, w_goal=self._cfg.w_goal, w_motion=self._cfg.w_motion, obstalce_thread=self._cfg.obstacle_thread, ) traj_cost.SetMap( data_path, self._cfg.cost_map_name, ) generator = PlannerDataGenerator( cfg=self._cfg.data_cfg[idx], root=data_path, semantics=self._cfg.sem, rgb=self._cfg.rgb, cost_map=traj_cost.cost_map, # trajectory cost class ) traj_viz = TrajViz( intrinsics=generator.K_depth, cam_resolution=self._cfg.img_input_size, camera_tilt=self._cfg.camera_tilt, cost_map=traj_cost.cost_map, ) self.data_generators.append(generator) self.data_traj_cost.append(traj_cost) self.data_traj_viz.append(traj_viz) print(f"LOADED DATA FOR ENVIRONMENT: {env_name}") print("[INFO] LOADED ALL DATA") return # Helper function TRAINING def _init_logging(self) -> None: # logging os.environ["WANDB_API_KEY"] = self._cfg.wb_api_key os.environ["WANDB_MODE"] = "online" os.makedirs(self._cfg.log_dir, exist_ok=True) try: wandb.init( project=self._cfg.wb_project, entity=self._cfg.wb_entity, name=self._cfg.get_model_save(), config=self._cfg.__dict__, dir=self._cfg.log_dir, ) except: # noqa: E722 print("[WARNING: Wandb not available") return def _load_model(self, resume: bool = False) -> None: if self._cfg.sem or self._cfg.rgb: if self._cfg.rgb and self._cfg.pre_train_sem: assert PRE_TRAIN_POSSIBLE, ( "Pretrained model not available since either detectron2" " not installed or mask2former not found in thrid_party" " folder" ) pre_train_cfg = os.path.join(self._cfg.all_model_dir, self._cfg.pre_train_cfg) pre_train_weights = ( os.path.join(self._cfg.all_model_dir, self._cfg.pre_train_weights) if self._cfg.pre_train_weights else None ) m2f_cfg = get_m2f_cfg(pre_train_cfg) self.pixel_mean = m2f_cfg.MODEL.PIXEL_MEAN self.pixel_std = m2f_cfg.MODEL.PIXEL_STD else: m2f_cfg = None pre_train_weights = None self.net = DualAutoEncoder(self._cfg, m2f_cfg=m2f_cfg, weight_path=pre_train_weights) else: self.net = AutoEncoder(self._cfg.in_channel, self._cfg.knodes) assert torch.cuda.is_available(), "Code requires GPU" print(f"Available GPU list: {list(range(torch.cuda.device_count()))}") print(f"Running on GPU: {self._cfg.gpu_id}") self.net = self.net.cuda(self._cfg.gpu_id) print(f"[INFO] MODEL LOADED ({count_parameters(self.net)} parameters)") if resume: model_state_dict, self.best_loss = torch.load(self.model_path) self.net.load_state_dict(model_state_dict) print(f"Resume train from {self.model_path} with loss " f"{self.best_loss}") return def _configure_optimizer(self) -> None: if self._cfg.optimizer == "adam": self.optimizer = optim.Adam( self.net.parameters(), lr=self._cfg.lr, weight_decay=self._cfg.w_decay, ) elif self._cfg.optimizer == "sgd": self.optimizer = optim.SGD( self.net.parameters(), lr=self._cfg.lr, momentum=self._cfg.momentum, weight_decay=self._cfg.w_decay, ) else: raise KeyError(f"Optimizer {self._cfg.optimizer} not supported") self.scheduler = EarlyStopScheduler( self.optimizer, factor=self._cfg.factor, verbose=True, min_lr=self._cfg.min_lr, patience=self._cfg.patience, ) print("[INFO] OPTIMIZER AND SCHEDULER CONFIGURED") return def _get_dataloader( self, train: bool = True, step: Optional[int] = None, allow_augmentation: bool = True, ) -> None: train_loader_list: List[Data.DataLoader] = [] val_loader_list: List[Data.DataLoader] = [] if step is not None: self.fov_ratio = ( 1.0 - (self._cfg.hierarchical_front_step_ratio + self._cfg.hierarchical_back_step_ratio) * step ) self.front_ratio = self._cfg.hierarchical_front_step_ratio * step self.back_ratio = self._cfg.hierarchical_back_step_ratio * step for generator in self.data_generators: # init data classes val_data = PlannerData( cfg=generator._cfg, transform=self.transform, semantics=self._cfg.sem, rgb=self._cfg.rgb, pixel_mean=self.pixel_mean, pixel_std=self.pixel_std, ) if train: train_data = PlannerData( cfg=generator._cfg, transform=self.transform, semantics=self._cfg.sem, rgb=self._cfg.rgb, pixel_mean=self.pixel_mean, pixel_std=self.pixel_std, ) else: train_data = None # split data in train and validation with given sample ratios if train: generator.split_samples( train_dataset=train_data, test_dataset=val_data, generate_split=train, ratio_back_samples=self.back_ratio, ratio_front_samples=self.front_ratio, ratio_fov_samples=self.fov_ratio, allow_augmentation=allow_augmentation, ) else: generator.split_samples( train_dataset=train_data, test_dataset=val_data, generate_split=train, ratio_back_samples=self.back_ratio, ratio_front_samples=self.front_ratio, ratio_fov_samples=self.fov_ratio, allow_augmentation=allow_augmentation, ) if self._cfg.load_in_ram: if train: train_data.load_data_in_memory() val_data.load_data_in_memory() if train: train_loader = Data.DataLoader( dataset=train_data, batch_size=self._cfg.batch_size, shuffle=True, pin_memory=True, num_workers=self._cfg.num_workers, ) val_loader = Data.DataLoader( dataset=val_data, batch_size=self._cfg.batch_size, shuffle=True, pin_memory=True, num_workers=self._cfg.num_workers, ) if train: train_loader_list.append(train_loader) val_loader_list.append(val_loader) return train_loader_list, val_loader_list def _train_epoch( self, loader: Data.DataLoader, epoch: int, env_id: int, ) -> float: train_loss, batches = 0, len(loader) enumerater = tqdm.tqdm(enumerate(loader)) for batch_idx, inputs in enumerater: odom = inputs[2].cuda(self._cfg.gpu_id) goal = inputs[3].cuda(self._cfg.gpu_id) self.optimizer.zero_grad() if self._cfg.sem or self._cfg.rgb: depth_image = inputs[0].cuda(self._cfg.gpu_id) sem_rgb_image = inputs[1].cuda(self._cfg.gpu_id) preds, fear = self.net(depth_image, sem_rgb_image, goal) else: image = inputs[0].cuda(self._cfg.gpu_id) preds, fear = self.net(image, goal) # flip y axis for augmented samples (clone necessary due to # inplace operation that otherwise leads to error in backprop) preds_flip = torch.clone(preds) preds_flip[inputs[4], :, 1] = preds_flip[inputs[4], :, 1] * -1 goal_flip = torch.clone(goal) goal_flip[inputs[4], 1] = goal_flip[inputs[4], 1] * -1 log_step = batch_idx + epoch * batches loss, _ = self._loss( preds_flip, fear, self.data_traj_cost[env_id], odom, goal_flip, log_step=log_step, ) wandb.log({"train_loss_step": loss}, step=log_step) loss.backward() self.optimizer.step() train_loss += loss.item() enumerater.set_description( f"Epoch: {epoch} in Env: " f"({env_id+1}/{len(self._cfg.env_list)-1}) " f"- train loss:{round(train_loss/(batch_idx+1), 4)} on" f" {batch_idx}/{batches}" ) return train_loss / (batch_idx + 1) def _test_epoch( self, loader, env_id: int, epoch: int = 0, is_visual=False, fov_angle: float = 90.0, dataset: str = "val", ) -> float: test_loss = 0 num_batches = len(loader) preds_viz = [] wp_viz = [] image_viz = [] with torch.no_grad(): for batch_idx, inputs in enumerate(loader): odom = inputs[2].cuda(self._cfg.gpu_id) goal = inputs[3].cuda(self._cfg.gpu_id) if self._cfg.sem or self._cfg.rgb: image = inputs[0].cuda(self._cfg.gpu_id) # depth sem_rgb_image = inputs[1].cuda(self._cfg.gpu_id) # sem preds, fear = self.net(image, sem_rgb_image, goal) else: image = inputs[0].cuda(self._cfg.gpu_id) preds, fear = self.net(image, goal) # flip y axis for augmented samples preds[inputs[4], :, 1] = preds[inputs[4], :, 1] * -1 goal[inputs[4], 1] = goal[inputs[4], 1] * -1 log_step = epoch * num_batches + batch_idx loss, waypoints = self._loss( preds, fear, self.data_traj_cost[env_id], odom, goal, log_step=log_step, dataset=dataset, ) if dataset == "val": wandb.log({f"{dataset}_loss_step": loss}, step=log_step) test_loss += loss.item() if is_visual and len(preds_viz) * batch_idx < self._cfg.n_visualize: if batch_idx == 0: odom_viz = odom.cpu() goal_viz = goal.cpu() fear_viz = fear.cpu() augment_viz = inputs[4].cpu() else: odom_viz = torch.cat((odom_viz, odom.cpu()), dim=0) goal_viz = torch.cat((goal_viz, goal.cpu()), dim=0) fear_viz = torch.cat((fear_viz, fear.cpu()), dim=0) augment_viz = torch.cat((augment_viz, inputs[4].cpu()), dim=0) preds_viz.append(preds.cpu()) wp_viz.append(waypoints.cpu()) image_viz.append(image.cpu()) if is_visual: preds_viz = torch.vstack(preds_viz) wp_viz = torch.vstack(wp_viz) image_viz = torch.vstack(image_viz) # limit again to number of visualizations since before # added as multiple of batch size preds_viz = preds_viz[: self._cfg.n_visualize] wp_viz = wp_viz[: self._cfg.n_visualize] image_viz = image_viz[: self._cfg.n_visualize] odom_viz = odom_viz[: self._cfg.n_visualize] goal_viz = goal_viz[: self._cfg.n_visualize] fear_viz = fear_viz[: self._cfg.n_visualize] augment_viz = augment_viz[: self._cfg.n_visualize] # visual trajectory and images self.data_traj_viz[env_id].VizTrajectory( preds_viz, wp_viz, odom_viz, goal_viz, fear_viz, fov_angle=fov_angle, augment_viz=augment_viz, ) self.data_traj_viz[env_id].VizImages(preds_viz, wp_viz, odom_viz, goal_viz, fear_viz, image_viz) return test_loss / (batch_idx + 1) def _loss( self, preds: torch.Tensor, fear: torch.Tensor, traj_cost: TrajCost, odom: torch.Tensor, goal: torch.Tensor, log_step: int, step: float = 0.1, dataset: str = "train", ) -> Tuple[torch.Tensor, torch.Tensor]: waypoints = traj_cost.opt.TrajGeneratorFromPFreeRot(preds, step=step) loss = traj_cost.CostofTraj( waypoints, odom, goal, fear, log_step, ahead_dist=self._cfg.fear_ahead_dist, dataset=dataset, ) return loss, waypoints # EoF
22,112
Python
34.608696
112
0.510899
leggedrobotics/viplanner/viplanner/utils/eval_utils.py
# Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab) # Author: Pascal Roth # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # python import os from typing import List, Optional, Union import matplotlib.pyplot as plt import numpy as np import torch import yaml # viplanner from viplanner.config.learning_cfg import Loader as TrainCfgLoader from viplanner.traj_cost_opt import TrajCost class BaseEvaluator: def __init__( self, distance_tolerance: float, obs_loss_threshold: float, cost_map_dir: Optional[str] = None, cost_map_name: Optional[str] = None, ) -> None: # args self.distance_tolerance = distance_tolerance self.obs_loss_threshold = obs_loss_threshold self.cost_map_dir = cost_map_dir self.cost_map_name = cost_map_name # parameters self._nbr_paths: int = 0 # load cost_map self._use_cost_map: bool = False if all([self.cost_map_dir, self.cost_map_name]): self._load_cost_map() return ## # Properties ## @property def nbr_paths(self) -> int: return self._nbr_paths def set_nbr_paths(self, nbr_paths: int) -> None: self._nbr_paths = nbr_paths return ## # Buffer ## def create_buffers(self) -> None: self.length_goal: np.ndarray = np.zeros(self._nbr_paths) self.length_path: np.ndarray = np.zeros(self._nbr_paths) self.path_extension: np.ndarray = np.zeros(self._nbr_paths) self.goal_distances: np.ndarray = np.zeros(self._nbr_paths) if self._use_cost_map: self.loss_obstacles: np.ndarray = np.zeros(self._nbr_paths) ## # Reset ## def reset(self) -> None: self.create_buffers() self.eval_stats = {} return ## # Cost Map ## def _load_cost_map(self) -> None: self._traj_cost: TrajCost = TrajCost(gpu_id=None) # use cpu for evaluation self._traj_cost.SetMap(self.cost_map_dir, self.cost_map_name) self._use_cost_map = True return def _get_cost_map_loss(self, path: Union[torch.Tensor, np.ndarray]) -> float: if isinstance(path, np.ndarray): waypoints = torch.tensor(path, dtype=torch.float32) else: waypoints = path.to(dtype=torch.float32) loss = self._traj_cost.cost_of_recorded_path(waypoints).numpy() if self._traj_cost.cost_map.cfg.semantics: loss -= self._traj_cost.cost_map.cfg.sem_cost_map.negative_reward return loss ## # Eval Statistics ## def eval_statistics(self) -> None: # Evaluate results goal_reached = self.goal_distances < self.distance_tolerance goal_reached_rate = sum(goal_reached) / len(goal_reached) avg_distance_to_goal = sum(self.goal_distances) / len(self.goal_distances) avg_distance_to_goal_reached = sum(self.goal_distances[goal_reached]) / sum(goal_reached) print( "All path segments been passed. Results: \nReached goal rate" f" (thres: {self.distance_tolerance}):\t{goal_reached_rate} \nAvg" f" goal-distance (all): \t{avg_distance_to_goal} \nAvg" f" goal-distance (reached):\t{avg_distance_to_goal_reached}" ) self.eval_stats = { "goal_reached_rate": goal_reached_rate, "avg_distance_to_goal_all": avg_distance_to_goal, "avg_distance_to_goal_reached": avg_distance_to_goal_reached, } if self._use_cost_map: within_obs_threshold = np.sum(self.loss_obstacles < self.obs_loss_threshold) / len(self.loss_obstacles) avg_obs_loss = sum(self.loss_obstacles) / len(self.loss_obstacles) avg_obs_loss_reached = sum(self.loss_obstacles[goal_reached]) / sum(goal_reached) max_obs_loss = max(self.loss_obstacles) max_obs_loss_reached = max(self.loss_obstacles[goal_reached]) if sum(goal_reached) > 0 else np.inf print( "Within obs threshold" f" ({self.obs_loss_threshold}):\t{within_obs_threshold} \nObstacle" f" loss (all): \t{avg_obs_loss} \nObstacle loss" f" (reached): \t{avg_obs_loss_reached} \nMax obstacle loss" f" (all): \t{max_obs_loss} \nMax obstacle loss" f" (reached):\t{max_obs_loss_reached}" ) self.eval_stats["avg_obs_loss_all"] = avg_obs_loss self.eval_stats["avg_obs_loss_reached"] = avg_obs_loss_reached self.eval_stats["max_obs_loss_all"] = max_obs_loss self.eval_stats["max_obs_loss_reached"] = max_obs_loss_reached return def save_eval_results(self, model_dir: str, save_name: str) -> None: # save eval results in model yaml yaml_path = model_dir[:-3] + ".yaml" if not os.path.exists(yaml_path): return with open(yaml_path) as file: data: dict = yaml.load(file, Loader=TrainCfgLoader) if "eval" not in data: data["eval"] = {} data["eval"][save_name] = self.eval_stats with open(yaml_path, "w") as file: yaml.dump(data, file) ## # Plotting ## def plt_single_model(self, eval_dir: str, show: bool = True) -> None: # check if directory exists os.makedirs(eval_dir, exist_ok=True) # get unique goal lengths and init buffers unique_goal_length = np.unique(np.round(self.length_goal, 1)) mean_path_extension = [] std_path_extension = [] mean_goal_distance = [] std_goal_distance = [] goal_counts = [] mean_obs_loss = [] std_obs_loss = [] for x in unique_goal_length: # get subset of path predictions with goal length x subset_idx = np.round(self.length_goal, 1) == x mean_path_extension.append(np.mean(self.path_extension[subset_idx])) std_path_extension.append(np.std(self.path_extension[subset_idx])) mean_goal_distance.append(np.mean(self.goal_distances[subset_idx])) std_goal_distance.append(np.std(self.goal_distances[subset_idx])) goal_counts.append(len(self.goal_distances[subset_idx])) if self._use_cost_map: mean_obs_loss.append(np.mean(self.loss_obstacles[subset_idx])) std_obs_loss.append(np.std(self.loss_obstacles[subset_idx])) # plot with the distance to the goal depending on the length between goal and start fig, ax = plt.subplots(figsize=(12, 10)) fig.suptitle("Path Length Increase", fontsize=20) ax.plot( unique_goal_length, mean_path_extension, color="blue", label="Average path length", ) ax.fill_between( unique_goal_length, np.array(mean_path_extension) - np.array(std_path_extension), np.array(mean_path_extension) + np.array(std_path_extension), color="blue", alpha=0.2, label="Uncertainty", ) ax.set_xlabel("Start-Goal Distance", fontsize=16) ax.set_ylabel("Path Length", fontsize=16) ax.set_title( ( "Avg increase of path length is" f" {round(np.mean(self.path_extension), 5)*100:.2f}% for" " successful paths with tolerance of" f" {self.distance_tolerance}" ), fontsize=16, ) ax.tick_params(axis="both", which="major", labelsize=14) ax.legend() fig.savefig(os.path.join(eval_dir, "path_length.png")) if show: plt.show() else: plt.close() # plot to compare the increase in path length depending on the distance between goal and start goal_success_mean = np.sum(self.goal_distances < self.distance_tolerance) / len(self.goal_distances) # Create a figure and two axis objects, with the second one sharing the x-axis of the first fig, ax1 = plt.subplots(figsize=(12, 10)) ax2 = ax1.twinx() fig.subplots_adjust(hspace=0.4) # Add some vertical spacing between the two plots # Plot the goal distance data ax1.plot( unique_goal_length, mean_goal_distance, color="blue", label="Average goal distance length", zorder=2, ) ax1.fill_between( unique_goal_length, np.array(mean_goal_distance) - np.array(std_goal_distance), np.array(mean_goal_distance) + np.array(std_goal_distance), color="blue", alpha=0.2, label="Uncertainty", zorder=1, ) ax1.set_xlabel("Start-Goal Distance", fontsize=16) ax1.set_ylabel("Goal Distance", fontsize=16) ax1.set_title( ( f"With a tolerance of {self.distance_tolerance} are" f" {round(goal_success_mean, 5)*100:.2f} % of goals reached" ), fontsize=16, ) ax1.tick_params(axis="both", which="major", labelsize=14) # Plot the goal counts data on the second axis ax2.bar( unique_goal_length, goal_counts, color="red", alpha=0.5, width=0.05, label="Number of samples", zorder=0, ) ax2.set_ylabel("Sample count", fontsize=16) ax2.tick_params(axis="both", which="major", labelsize=14) # Combine the legends from both axes lines, labels = ax1.get_legend_handles_labels() bars, bar_labels = ax2.get_legend_handles_labels() ax2.legend(lines + bars, labels + bar_labels, loc="upper center") plt.suptitle("Goal Distance", fontsize=20) fig.savefig(os.path.join(eval_dir, "goal_distance.png")) if show: plt.show() else: plt.close() if self._use_cost_map: # plot to compare the obs loss depending on the distance between goal and start avg_obs_loss = np.mean(self.loss_obstacles) obs_threshold_success_rate = np.sum(self.loss_obstacles < self.obs_loss_threshold) / len( self.loss_obstacles ) fig, ax = plt.subplots(figsize=(12, 10)) fig.suptitle("Obstacle Loss", fontsize=20) ax.plot( unique_goal_length, mean_obs_loss, color="blue", label="Average obs loss", ) ax.fill_between( unique_goal_length, np.array(mean_obs_loss) - np.array(std_obs_loss), np.array(mean_obs_loss) + np.array(std_obs_loss), color="blue", alpha=0.2, label="Uncertainty", ) ax.set_xlabel("Start-Goal Distance", fontsize=16) ax.set_ylabel("Obstacle Loss", fontsize=16) ax.set_title( ( f"Avg obstacle loss {round(avg_obs_loss, 5):.5f} with" f" {obs_threshold_success_rate}% within obs thres" f" {self.obs_loss_threshold}" ), fontsize=16, ) ax.tick_params(axis="both", which="major", labelsize=14) ax.legend() fig.savefig(os.path.join(eval_dir, "obs_cost.png")) if show: plt.show() else: plt.close() return def plt_comparison( self, length_goal_list: List[np.ndarray], goal_distance_list: List[np.ndarray], path_extension_list: List[np.ndarray], model_dirs: List[str], save_dir: str, obs_loss_list: Optional[List[np.ndarray]] = None, model_names: Optional[List[str]] = None, ) -> None: # path increase plot fig_path, axs_path = plt.subplots(figsize=(12, 10)) fig_path.suptitle("Path Extension", fontsize=24) axs_path.set_xlabel("Start-Goal Distance [m]", fontsize=20) axs_path.set_ylabel("Path Extension [%]", fontsize=20) axs_path.tick_params(axis="both", which="major", labelsize=16) # goal distance plot fig_goal, axs_goal = plt.subplots(figsize=(12, 10)) fig_goal.suptitle("Goal Distance", fontsize=24) axs_goal.set_xlabel("Start-Goal Distance [m]", fontsize=20) axs_goal.set_ylabel("Goal Distance [m]", fontsize=20) axs_goal.tick_params(axis="both", which="major", labelsize=16) if self._use_cost_map: assert obs_loss_list is not None, "If cost map is used, obs_loss_list must be provided" # obs loss plot fig_obs, axs_obs = plt.subplots(figsize=(12, 10)) # fig_obs.suptitle("Mean Obstacle Loss Along Path", fontsize=24) axs_obs.set_xlabel("Start-Goal Distance [m]", fontsize=20) axs_obs.set_ylabel("Mean Obstacle Loss", fontsize=20) axs_obs.tick_params(axis="both", which="major", labelsize=16) bar_width = 0.8 / len(length_goal_list) for idx in range(len(length_goal_list)): if model_names is None: model_name = os.path.split(model_dirs[idx])[1] else: model_name = model_names[idx] goal_success_bool = goal_distance_list[idx] < self.distance_tolerance unique_goal_length = np.unique(np.round(length_goal_list[idx], 0)) mean_path_extension = [] std_path_extension = [] mean_goal_distance = [] std_goal_distance = [] mean_obs_loss = [] std_obs_loss = [] goal_length_obs_exists = [] unqiue_goal_length_used = [] for x in unique_goal_length: if x == 0: continue # get subset of path predictions with goal length x subset_idx = np.round(length_goal_list[idx], 0) == x mean_path_extension.append(np.mean(path_extension_list[idx][subset_idx])) std_path_extension.append(np.std(path_extension_list[idx][subset_idx])) mean_goal_distance.append(np.mean(goal_distance_list[idx][subset_idx])) std_goal_distance.append(np.std(goal_distance_list[idx][subset_idx])) if self._use_cost_map: y_obs_subset = obs_loss_list[idx][subset_idx] if len(y_obs_subset) != 0: mean_obs_loss.append(np.mean(y_obs_subset)) std_obs_loss.append(np.std(y_obs_subset)) goal_length_obs_exists.append(x) else: print(f"Warning: No obs loss for {model_name} at goal" f" distance {x}") unqiue_goal_length_used.append(x) unique_goal_length = np.array(unqiue_goal_length_used) goal_length_obs_exists = np.array(goal_length_obs_exists) bar_pos = bar_width / 2 + idx * bar_width - 0.4 # plot to compare the increase in path length depending in on the distance between goal and start for the successful paths avg_increase = np.mean(path_extension_list[idx]) axs_path.bar( unique_goal_length + bar_pos, mean_path_extension, width=bar_width, label=(f"{model_name} (avg {round(avg_increase, 5)*100:.2f} %))"), alpha=0.8, ) # yerr=std_path_extension, # axs_path.plot(goal_length_path_exists, mean_path_extension, label=f'{model_name} ({round(avg_increase, 5)*100:.2f} %))') # axs_path.fill_between(goal_length_path_exists, np.array(mean_path_extension) - np.array(std_path_extension), np.array(mean_path_extension) + np.array(std_path_extension), alpha=0.2) # plot with the distance to the goal depending on the length between goal and start goal_success = np.sum(goal_success_bool) / len(goal_distance_list[idx]) axs_goal.bar( unique_goal_length + bar_pos, mean_goal_distance, width=bar_width, label=(f"{model_name} (success rate" f" {round(goal_success, 5)*100:.2f} %)"), alpha=0.8, ) # yerr=std_goal_distance, # axs_goal.plot(unique_goal_length, mean_goal_distance, label=f'{model_name} ({round(goal_success, 5)*100:.2f} %)') # axs_goal.fill_between(unique_goal_length, np.array(mean_goal_distance) - np.array(std_goal_distance), np.array(mean_goal_distance) + np.array(std_goal_distance), alpha=0.2) if self._use_cost_map: # plot with the distance to the goal depending on the length between goal and start avg_obs_loss = np.mean(obs_loss_list[idx]) axs_obs.bar( goal_length_obs_exists + bar_pos, mean_obs_loss, width=bar_width, label=f"{model_name} (avg {round(avg_obs_loss, 5):.3f})", alpha=0.8, ) # yerr=std_obs_loss, # axs_obs.plot(goal_length_obs_exists, mean_obs_loss, label=f'{model_name} ({round(avg_obs_loss, 5):.5f} %)') # axs_obs.fill_between(goal_length_obs_exists, np.array(mean_obs_loss) - np.array(std_obs_loss), np.array(mean_obs_loss) + np.array(std_obs_loss), alpha=0.2) # plot threshold for successful path axs_goal.axhline( y=self.distance_tolerance, color="red", linestyle="--", label="threshold", ) axs_path.legend(fontsize=20) axs_goal.legend(fontsize=20) fig_path.savefig(os.path.join(save_dir, "path_length_comp.png")) fig_goal.savefig(os.path.join(save_dir, "goal_distance_comp.png")) if self._use_cost_map: axs_obs.legend(fontsize=20) fig_obs.savefig(os.path.join(save_dir, "obs_loss_comp.png")) plt.show() return # EoF
18,324
Python
37.906582
195
0.560358
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/omni/isaac/terrain_generator/extension.py
import omni.ext import omni.ui as ui from .terrain_utils import * from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage from omni.isaac.core.utils.prims import define_prim, get_prim_at_path import omni.usd # Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)` def some_public_function(x: int): print("[omni.isaac.terrain_generator] some_public_function was called with x: ", x) return x ** x # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class OmniIsaacTerrain_generatorExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[omni.isaac.terrain_generator] omni isaac terrain_generator startup") self._count = 0 self._window = ui.Window("My Window", width=300, height=300) with self._window.frame: with ui.VStack(): label = ui.Label("") def on_click(): self.get_terrain() # self._count += 1 label.text = "Generate Terrain" def on_reset(): self.clear_terrain() # self._count = 0 label.text = "Clear Stage" on_reset() with ui.HStack(): ui.Button("Add Terrain", clicked_fn=on_click) ui.Button("Clear Stage", clicked_fn=on_reset) def on_shutdown(self): print("[omni.isaac.terrain_generator] omni isaac terrain_generator shutdown") # This deletes the terrain def clear_terrain(self): current_stage = get_current_stage() current_stage.RemovePrim("/World/terrain") # The stuff that makes terrain def get_terrain(self): stage = get_current_stage() # create all available terrain types num_terains = 8 terrain_width = 12. terrain_length = 12. horizontal_scale = 0.25 # [m] vertical_scale = 0.005 # [m] num_rows = int(terrain_width/horizontal_scale) num_cols = int(terrain_length/horizontal_scale) heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16) def new_sub_terrain(): return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale) # weird heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw # Make a plain slope, need to understand how to control. When deleted makes a flat terrain heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw # Pyramid slope, probably the base for the stairs code heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw # nice square obstacles randomly generated heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw # Nice curvy terrain heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw # Adjust stair step size, how far it goes down or up. heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw # Need to figure out how to cahnge step heights and make a Pyramid Stair go up heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw # Stepping Stones need fixing depth heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1., stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5) position = np.array([-6.0, 48.0, 0]) orientation = np.array([0.70711, 0.0, 0.0, -0.70711]) add_terrain_to_stage(stage=stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation) # Error # Cannot load DefinePrim # File "e:\bored engineer github\bored engineer\awesome_terrains\exts\omni.isaac.terrain_generator\omni\isaac\terrain_generator\extension.py", line 4, in <module> # from .terrain_utils import * # File "e:\bored engineer github\bored engineer\awesome_terrains\exts\omni.isaac.terrain_generator\omni\isaac\terrain_generator\terrain_utils.py", line 384, in <module> # terrain = add_terrain_to_stage(stage, vertices, triangles) # File "e:\bored engineer github\bored engineer\awesome_terrains\exts\omni.isaac.terrain_generator\omni\isaac\terrain_generator\terrain_utils.py", line 340, in add_terrain_to_stage # terrain_mesh = stage.DefinePrim("/World/terrain", "Mesh") # AttributeError: 'NoneType' object has no attribute 'DefinePrim'
5,526
Python
52.14423
182
0.666305
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/omni/isaac/terrain_generator/create_terrain_demo.py
import os, sys SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append(SCRIPT_DIR) import omni from omni.isaac.kit import SimulationApp import numpy as np import torch simulation_app = SimulationApp({"headless": False}) from abc import abstractmethod from omni.isaac.core.tasks import BaseTask from omni.isaac.core.prims import RigidPrimView, RigidPrim, XFormPrim from omni.isaac.core import World from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.utils.prims import define_prim, get_prim_at_path from omni.isaac.core.utils.nucleus import find_nucleus_server from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage from omni.isaac.core.materials import PreviewSurface from omni.isaac.cloner import GridCloner from pxr import UsdPhysics, UsdLux, UsdShade, Sdf, Gf, UsdGeom, PhysxSchema from .terrain_utils import * class TerrainCreation(BaseTask): def __init__(self, name, num_envs, num_per_row, env_spacing, config=None, offset=None,) -> None: BaseTask.__init__(self, name=name, offset=offset) self._num_envs = num_envs self._num_per_row = num_per_row self._env_spacing = env_spacing self._device = "cpu" self._cloner = GridCloner(self._env_spacing, self._num_per_row) self._cloner.define_base_env(self.default_base_env_path) define_prim(self.default_zero_env_path) @property def default_base_env_path(self): return "/World/envs" @property def default_zero_env_path(self): return f"{self.default_base_env_path}/env_0" def set_up_scene(self, scene) -> None: self._stage = get_current_stage() distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight")) distantLight.CreateIntensityAttr(2000) self.get_terrain() self.get_ball() super().set_up_scene(scene) prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs) print(f"cloning {self._num_envs} environments...") self._env_pos = self._cloner.clone( source_prim_path="/World/envs/env_0", prim_paths=prim_paths ) return def get_terrain(self): # create all available terrain types num_terains = 8 terrain_width = 12. terrain_length = 12. horizontal_scale = 0.25 # [m] vertical_scale = 0.005 # [m] num_rows = int(terrain_width/horizontal_scale) num_cols = int(terrain_length/horizontal_scale) heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16) def new_sub_terrain(): return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale) heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1., stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5) position = np.array([-6.0, 48.0, 0]) orientation = np.array([0.70711, 0.0, 0.0, -0.70711]) add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation) def get_ball(self): ball = DynamicSphere(prim_path=self.default_zero_env_path + "/ball", name="ball", translation=np.array([0.0, 0.0, 1.0]), mass=0.5, radius=0.2,) def post_reset(self): for i in range(self._num_envs): ball_prim = self._stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}/ball") color = 0.5 + 0.5 * np.random.random(3) visual_material = PreviewSurface(prim_path=f"{self.default_base_env_path}/env_{i}/ball/Looks/visual_material", color=color) binding_api = UsdShade.MaterialBindingAPI(ball_prim) binding_api.Bind(visual_material.material, bindingStrength=UsdShade.Tokens.strongerThanDescendants) def get_observations(self): pass def calculate_metrics(self) -> None: pass def is_done(self) -> None: pass if __name__ == "__main__": world = World( stage_units_in_meters=1.0, rendering_dt=1.0/60.0, backend="torch", device="cpu", ) num_envs = 800 num_per_row = 80 env_spacing = 0.56*2 terrain_creation_task = TerrainCreation(name="TerrainCreation", num_envs=num_envs, num_per_row=num_per_row, env_spacing=env_spacing, ) world.add_task(terrain_creation_task) world.reset() while simulation_app.is_running(): if world.is_playing(): if world.current_time_step_index == 0: world.reset(soft=True) world.step(render=True) else: world.step(render=True) simulation_app.close()
6,310
Python
41.355704
166
0.61775
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/docs/README.md
# Awesome Terrain Generator Example [omni.isaac.terrain_generator] This extension is intened to help to explore how to programatically generate terrains for OIGE (Omniverse Isaac Gym Reinforcement Learning Environments for Isaac Sim).
237
Markdown
46.599991
167
0.827004
USDSync/MetaCloudExplorer/README.md
# USDSync.com # Meta Cloud Explorer (MCE) # NVIDIA Onmiverse Extension, a Scene Authoring Tool (In Beta Development phase) ![Meta Cloud Explorer](https://github.com/USDSync/MetaCloudExplorer/blob/main/exts/meta.cloud.explorer.azure/data/resources/meta_cloud_explorer_800.png) The true power of the Metaverse is to gain new insights to existing problems by experiencing things in a different way. A simple change in perspective can sometimes work wonders! Meta Cloud Explorer helps Cloud Architects visualize thier cloud infrastructure at scale. It's a dynamic metaverse creation tool for Nvidia's Omniverse. Quickly connect to your Cloud Infrastructure and visualize it in your private Omniverse!* This extension generates digital models of your Cloud Infrastructure that can be used to gain insights to drive better infrastructure, optimized resources, reduced costs, and breakthrough customer experiences. ![Meta Cloud Explorer](https://github.com/USDSync/MetaCloudExplorer/blob/main/exts/meta.cloud.explorer.azure/data/resources/azurescaled.png) **Gain Insight by seeing your infrastructure at scale:** ![Meta Cloud Explorer](https://github.com/USDSync/MetaCloudExplorer/blob/main/exts/meta.cloud.explorer.azure/data/resources/resourcegroups.png) **View resources by Location, Group, Type, Subscription** ![Meta Cloud Explorer](https://github.com/USDSync/MetaCloudExplorer/blob/main/exts/meta.cloud.explorer.azure/data/resources/westus.png) **Optional costs data integrated to groups** ![Meta Cloud Explorer](https://github.com/USDSync/MetaCloudExplorer/blob/main/exts/meta.cloud.explorer.azure/data/resources/costs.png) **Click any shape for more information** ![Meta Cloud Explorer](https://github.com/USDSync/MetaCloudExplorer/blob/main/exts/meta.cloud.explorer.azure/data/resources/CostDetails.png) **Can support multiple clouds!** ![Meta Cloud Explorer](https://github.com/USDSync/MetaCloudExplorer/blob/main/exts/meta.cloud.explorer.azure/data/resources/aws-azure-gcp.png) **Only works with Microsoft Azure Cloud currently* AWS, GCP planned on roadmap. The whole point of Meta Cloud Explorer (MCE) is to let you experience your cloud infrastructure in a new way. The Metaverse is here, it means a lot of different things, but one of the key tenants of the Metaverse is to be able to experience new things, or to experience existing things in a different or new contexts. A Metaverse is just a 3D space you can customize and share, or even visit it with other people. The ability to experience a virtual place with others opens up a world of possibility for training, knowledge sharing and helping others to understand technology. ### Cloud Infrastructure 2023 + Azure, AWS, GCP are massive in reach, with millions of customers, and probably billions of pieces of infrastructure, it's a lot for cloud architect, infrastructure and SRE engineers manage. Cloud Architects have many tools to help manage the complexity and risk of managing cloud operations, infrastructure and deployments at scale. Infrastructure is now managed just like code, in Source Control and connected to powerful Cloud Orchestration software like Cloud Formation, Terraform, Bicep and others, giving Cloud Architects, Engineers and Developers even more powerful tools to help scale and manage the cloud. Existing Web based UIs suffer from "focusing on small groups of trees" which makes it hard to "see the forest" in this context. There is no shortage of "Infrastructure diagram generation" tools that can produce 2d representations of your cloud infrastructure. Visio, Lucid Scale and other cloud based diagramming tools help architects manage and understand cloud infrastructure design and architecture. Many diagrams are still manually maintained and time-consuming. Lucid Scale lets you generate a model from your cloud infrastructure like this: ![Current state of 2D diagrams](https://d15shllkswkct0.cloudfront.net/wp-content/blogs.dir/1/files/2021/10/lucidscale-overview.png) ### NVIDIA Omniverse and USD Universal Scene Description Meta Cloud Explorer doesn't replace any of these tools, It's a new tool, a new way to see, understand and experience your cloud infrastructure at scale. Thanks to the power of NVIDIA's Omniverse, we can now create real-time, photo realistic environments modeled directly from your cloud infrastructure! As Meta Cloud Explorer matures, it will help you travel through time, see infrastructure differences and configurations in new ways, even create entirely new architectures. The power to visualize networks, network traffic and "the edge" can help engineers understand cloud security, endpoints and vulnerabilities. In version 1.0 of Meta Cloud Explorer, we've just began to bridge the gap between cloud infrastructure and the metaverse. There is so much that can be done in this new context! v1.0 provides a read-only view of your cloud infrastructure and gives you an easy to use toolbox to create a simulated world that represents your cloud. Future versions will let you create infrastructure from template libraries, learn about new architecture techniques and simulate design changes. USDSync.com aims to create a SaaS based "AzureverseAsAService", where we can host your "Azureverse" live and in sync with your real cloud 24x7. No more scene composition and design, USDSync.com can host these resources and keep them in sync automatically. ### The right tool for the right job Have you ever tried to cleanup your hard drive? Windows File Explorer suffers from the same problem as the Azure, AWS and GCP UI portals. You are always in just one folder, or just one group or just one project, it's really hard just to understand the scale of the whole thing! Want to understand "Whats taking up space on your hard disk?"... Good luck... Yes, you can right click on every folder and view usage, but it's just not as easy as it should be! - We just keep running out of space and adding bigger and bigger hard drives! Ever used WinDirStat? WinDirStat is a free program that shows visually what is taking up space on your hard drive allowing you to gain insights that windows just doesn't provide. Once it has scanned your drive it visually shows you all the files on your disk, in a bin-packed graph. This graph allows you to easily see and understand what files and folders are taking up the space on your disk. Look what a simple change in context does for you?!?! ![MCE is like the WinDirStat of Cloud Infrastructure](https://github.com/USDSync/MetaCloudExplorer/blob/ab0d10ca2035e5db79b426425c54b79eb70ad4d9/exts/meta.cloud.explorer.azure/data/resources/windirstat.png) Think of MCE as the "WinDirStat" of your Azure,. AWS, GCP cloud infrastructure.. just another tool, but wow, the instant insights, change in context, really helps sometimes. I honestly don't know how you would figure out whats taking up space on your hard disk without a tool like WinDirStat. Look at the difference between looking at your infrastructure in Azure, AWS or GCP, vs looking at it in MCE! While the context is obviously a bit different and MCE does not replace these tools at all, you can immediately gain insight! ### Azure Portal ![](https://github.com/USDSync/MetaCloudExplorer/blob/0f864116d2b2ea3e65532fdf9f1b7105e79158ab/exts/meta.cloud.explorer.azure/data/resources/azure_resource_list.png) ### AWS Portal ![](https://github.com/USDSync/MetaCloudExplorer/blob/0f864116d2b2ea3e65532fdf9f1b7105e79158ab/exts/meta.cloud.explorer.azure/data/resources/aws_services_list.png) ### GCP Portal ![](https://github.com/USDSync/MetaCloudExplorer/blob/0f864116d2b2ea3e65532fdf9f1b7105e79158ab/exts/meta.cloud.explorer.azure/data/resources/google_cloud_ui.png) ### I just want to see ALL MY INFRASTRUCUTRE, in ONE PLACE, RIGHT NOW! I don't always want to just look at one resource group at a time. I don't want to endlessly filter lists or search for things. I don't always want to just look at diagrams or templates or even the code that built this infrastructure, I JUST WANT TO SEE IT! In one place, like it's a place I could go and if its virtual, be able to rearrange it instantly! I rest my case, may I present **Meta Cloud Explorer**!! :) ### Meta Cloud Explorer Extension in Omniverse! ![](https://github.com/USDSync/MetaCloudExplorer/blob/0f864116d2b2ea3e65532fdf9f1b7105e79158ab/exts/meta.cloud.explorer.azure/data/resources/resourcegroups.png) This is just "a picture" of my Azureverse. It's really a digital world I can travel around inside, rearrange, re-group, change groupings, costs, layouts and more. It's an easy way to just see your cloud infrastructure, right now, visually.. it's actually quite cool ;) MCE even works in Omniverse XR! Just save your USD scene and open it in Omniverse XR, strap on your VR headset and immerse yourself inside your cloud. The future is here and it's time you have a tool that lets you quickly understand your cloud infrastructure, costs and resources distribution. To the Metaverse and beyond! (kinda dumb I know, but it's better than Infinity, heh) Goto the Wiki to Get Started! https://github.com/USDSync/MetaCloudExplorer/wiki
9,153
Markdown
93.371133
429
0.799847
USDSync/MetaCloudExplorer/azure-pipelines.yml
# Python package # Create and test a Python package on multiple Python versions. # Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more: # https://docs.microsoft.com/azure/devops/pipelines/languages/python trigger: - main pool: vmImage: ubuntu-latest strategy: matrix: Python27: python.version: '2.7' Python35: python.version: '3.5' Python36: python.version: '3.6' Python37: python.version: '3.7' steps: - task: UsePythonVersion@0 inputs: versionSpec: '$(python.version)' displayName: 'Use Python $(python.version)' - script: | python -m pip install --upgrade pip pip install -r requirements.txt displayName: 'Install dependencies' - script: | pip install pytest pytest-azurepipelines pytest displayName: 'pytest'
849
YAML
21.972972
113
0.696113
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/csv_data_manager.py
# Selection UI window for importing CSV files import carb from omni.kit.window.file_importer import get_file_importer import os.path import asyncio from pathlib import Path # external python lib import csv import itertools from .data_store import DataStore from .prim_utils import cleanup_prim_path import omni.kit.notification_manager as nm #This class is designed to import data from 3 input files #This file acts like a data provider for the data_manager class CSVDataManager(): def __init__(self): self._dataStore = DataStore.instance() # Get A Singleton instance, store data here # limit the number of rows read self.max_elements = 5000 #specify the filesnames to load def loadFilesManual(self, grpFile:str, resFile:str): self.load_grp_file_manual(grpFile) self.load_res_file_manual(resFile) #Load all the data from CSV files and process it def loadFiles(self): self.load_grp_file() self.load_res_file() #Resource Groups File Import #NAME,SUBSCRIPTION,LOCATION def load_grp_file_manual(self, fileName): i=1 with open(fileName, encoding='utf-8-sig', newline='') as csvfile: reader = csv.DictReader(csvfile, delimiter=',') for row in reader: name = row["NAME"] subs = row["SUBSCRIPTION"] location = row["LOCATION"] grp = {name:{"name":name, "subs": subs, "location":location}} self._dataStore._groups.update(grp) i=i+1 if i > self.max_elements: return self.sendNotify("MCE: Azure groups loaded: " + str(len(self._dataStore._groups)), nm.NotificationStatus.INFO) #Groups File Import def load_grp_file(self): if os.path.exists(self._dataStore._rg_csv_file_path): self.load_grp_file_manual(self._dataStore._rg_csv_file_path) # Read CSV Resources file # Expects fields: # NAME,TYPE,RESOURCE GROUP,LOCATION,SUBSCRIPTION, LMCOST def load_res_file_manual(self, fileName): i=1 with open(fileName, encoding='utf-8-sig') as file: reader = csv.DictReader(file, delimiter=',') for row in reader: name = row["NAME"] type = row["TYPE"] group = row["RESOURCE GROUP"] location = row["LOCATION"] subscription = row["SUBSCRIPTION"] lmcost = row["LMCOST"] #fix spacing, control chars early name = cleanup_prim_path(self, Name=name) self._dataStore._resources[name] = {"name":name, "type": type, "group": group, "location":location, "subscription":subscription, "lmcost": lmcost} i=i+1 if i > self.max_elements: return self.sendNotify("MCE: Azure resources loaded: " + str(len(self._dataStore._resources)), nm.NotificationStatus.INFO) #Resources File Import def load_res_file(self): # check that CSV exists if os.path.exists(self._dataStore._rs_csv_file_path): self.load_res_file_manual(self._dataStore._rs_csv_file_path) # Handles the click of the Load button for file selection dialog def select_file(self, fileType: str): self.file_importer = get_file_importer() if fileType == "rg": self.file_importer.show_window( title="Select a CSV File", import_button_label="Select", import_handler=self._on_click_rg_open, file_extension_types=[(".csv", "CSV Files (*.csv)")], file_filter_handler=self._on_filter_item ) if fileType == "res": self.file_importer.show_window( title="Select a CSV File", import_button_label="Select", import_handler=self._on_click_res_open, file_extension_types=[(".csv", "CSV Files (*.csv)")], file_filter_handler=self._on_filter_item ) if fileType == "bgl": self.file_importer.show_window( title="Select a png image file", import_button_label="Select", import_handler=self._on_click_bgl_open, file_extension_types=[(".png", "PNG Files (*.png)")], file_filter_handler=self._on_filter_item ) if fileType == "bgm": self.file_importer.show_window( title="Select a png image file", import_button_label="Select", import_handler=self._on_click_bgm_open, file_extension_types=[(".png", "PNG Files (*.png)")], file_filter_handler=self._on_filter_item ) if fileType == "bgh": self.file_importer.show_window( title="Select a png image file", import_button_label="Select", import_handler=self._on_click_bgh_open, file_extension_types=[(".png", "PNG Files (*.png)")], file_filter_handler=self._on_filter_item ) # Handles the click of the open button within the file importer dialog def _on_click_rg_open(self, filename: str, dirname: str, selections): # File name should not be empty. filename = filename.strip() if not filename: carb.log_warn(f"Filename must be provided.") return # create the full path to csv file if dirname: fullpath = f"{dirname}/{filename}" else: fullpath = filename self._dataStore._rg_csv_file_path = fullpath self._dataStore._rg_csv_field_model.set_value(str(fullpath)) # Handles the click of the open button within the file importer dialog def _on_click_res_open(self, filename: str, dirname: str, selections): # File name should not be empty. filename = filename.strip() if not filename: carb.log_warn(f"Filename must be provided.") return # create the full path to csv file if dirname: fullpath = f"{dirname}/{filename}" else: fullpath = filename self._dataStore._rs_csv_file_path = fullpath self._dataStore._rs_csv_field_model.set_value(str(fullpath)) # Handles the click of the open button within the file importer dialog def _on_click_bgl_open(self, filename: str, dirname: str, selections): # File name should not be empty. filename = filename.strip() if not filename: carb.log_warn(f"Filename must be provided.") return # create the full path to csv file if dirname: fullpath = f"{dirname}/{filename}" else: fullpath = filename self._dataStore._bgl_file_path = fullpath self._dataStore._bgl_field_model.set_value(str(fullpath)) self._dataStore.Save_Config_Data() # Handles the click of the open button within the file importer dialog def _on_click_bgm_open(self, filename: str, dirname: str, selections): # File name should not be empty. filename = filename.strip() if not filename: carb.log_warn(f"Filename must be provided.") return # create the full path to csv file if dirname: fullpath = f"{dirname}/{filename}" else: fullpath = filename self._dataStore._bgm_file_path = fullpath self._dataStore._bgm_field_model.set_value(str(fullpath)) self._dataStore.Save_Config_Data() # Handles the click of the open button within the file importer dialog def _on_click_bgh_open(self, filename: str, dirname: str, selections): # File name should not be empty. filename = filename.strip() if not filename: carb.log_warn(f"Filename must be provided.") return # create the full path to csv file if dirname: fullpath = f"{dirname}/{filename}" else: fullpath = filename self._dataStore._bgh_file_path = fullpath self._dataStore._bgh_field_model.set_value(str(fullpath)) self._dataStore.Save_Config_Data() # Handles the filtering of files within the file importer dialog def _on_filter_item(self, filename: str, filter_postfix: str, filter_ext: str) -> bool: if not filename: return True # Show only .csv files _, ext = os.path.splitext(filename) if ext == filter_ext: return True else: return False def sendNotify(self, message:str, status:nm.NotificationStatus): # https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager# import omni.kit.notification_manager as nm ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok) nm.post_notification( message, hide_after_timeout=True, duration=3, status=status, button_infos=[], ) def clicked_ok(): carb.log_info("User clicked ok")
9,552
Python
35.323194
162
0.573702
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/widget_info_manipulator.py
## Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## __all__ = ["WidgetInfoManipulator"] from omni.ui import color as cl from omni.ui import scene as sc import omni.ui as ui import carb class _ViewportLegacyDisableSelection: """Disables selection in the Viewport Legacy""" def __init__(self): self._focused_windows = None focused_windows = [] try: # For some reason is_focused may return False, when a Window is definitely in fact is the focused window! # And there's no good solution to this when mutliple Viewport-1 instances are open; so we just have to # operate on all Viewports for a given usd_context. import omni.kit.viewport_legacy as vp vpi = vp.acquire_viewport_interface() for instance in vpi.get_instance_list(): window = vpi.get_viewport_window(instance) if not window: continue focused_windows.append(window) if focused_windows: self._focused_windows = focused_windows for window in self._focused_windows: # Disable the selection_rect, but enable_picking for snapping window.disable_selection_rect(True) except Exception: pass class _DragPrioritize(sc.GestureManager): """Refuses preventing _DragGesture.""" def can_be_prevented(self, gesture): # Never prevent in the middle of drag return gesture.state != sc.GestureState.CHANGED def should_prevent(self, gesture, preventer): if preventer.state == sc.GestureState.BEGAN or preventer.state == sc.GestureState.CHANGED: return True class _DragGesture(sc.DragGesture): """"Gesture to disable rectangle selection in the viewport legacy""" def __init__(self): super().__init__(manager=_DragPrioritize()) def on_began(self): # When the user drags the slider, we don't want to see the selection # rect. In Viewport Next, it works well automatically because the # selection rect is a manipulator with its gesture, and we add the # slider manipulator to the same SceneView. # In Viewport Legacy, the selection rect is not a manipulator. Thus it's # not disabled automatically, and we need to disable it with the code. self.__disable_selection = _ViewportLegacyDisableSelection() def on_ended(self): # This re-enables the selection in the Viewport Legacy self.__disable_selection = None class WidgetInfoManipulator(sc.Manipulator): def __init__(self, **kwargs): super().__init__(**kwargs) #self.destroy() self._radius = 2 self._distance_to_top = 5 self._thickness = 2 self._radius_hovered = 20 def destroy(self): self._root = None self._path_label = None self._name_label = None self._grp_label = None self._type_label = None self._location_label = None self._sub_label = None self._cost_label = None def _on_build_widgets(self): carb.log_info("WidgetInfoManipulator - on_build_widgets") with ui.ZStack(): ui.Rectangle( style={ "background_color": cl(0.2), "border_color": cl(0.7), "border_width": 2, "border_radius": 4, } ) with ui.VStack(style={"font_size": 24}): ui.Spacer(height=4) with ui.ZStack(style={"margin": 1}, height=30): ui.Rectangle( style={ "background_color": cl(0.0), } ) ui.Line(style={"color": cl(0.7), "border_width": 2}, alignment=ui.Alignment.BOTTOM) ui.Label("MCE: Resource Information", height=0, alignment=ui.Alignment.LEFT) ui.Spacer(height=4) self._path_label = ui.Label("Path:", height=0, alignment=ui.Alignment.LEFT) self._name_label = ui.Label("Name:", height=0, alignment=ui.Alignment.LEFT) self._grp_label = ui.Label("RGrp:", height=0, alignment=ui.Alignment.LEFT) self._type_label = ui.Label("Type:", height=0, alignment=ui.Alignment.LEFT) self._location_label = ui.Label("Location:", height=0, alignment=ui.Alignment.LEFT) self._sub_label = ui.Label("Sub:", height=0, alignment=ui.Alignment.LEFT) self._cost_label = ui.Label("Cost:", height=0, alignment=ui.Alignment.LEFT) self.on_model_updated(None) # Additional gesture that prevents Viewport Legacy selection self._widget.gestures += [_DragGesture()] def on_build(self): carb.log_info("WidgetInfoManipulator - on_build") """Called when the model is chenged and rebuilds the whole slider""" self._root = sc.Transform(visible=False) with self._root: with sc.Transform(scale_to=sc.Space.SCREEN): with sc.Transform(transform=sc.Matrix44.get_translation_matrix(0, 100, 0)): # Label with sc.Transform(look_at=sc.Transform.LookAt.CAMERA): self._widget = sc.Widget(600, 250, update_policy=sc.Widget.UpdatePolicy.ON_MOUSE_HOVERED) self._widget.frame.set_build_fn(self._on_build_widgets) self._on_build_widgets() def on_model_updated(self, _): try: # if we don't have selection then show nothing if not self.model or not self.model.get_item("name"): if hasattr(self, "_root"): self._root.visible = False return else: # Update the shapes position = self.model.get_as_floats(self.model.get_item("position")) self._root.transform = sc.Matrix44.get_translation_matrix(*position) self._root.visible = True except: return #how to select parent ? # name = self.model.get_item('name') # if name.find("Collision") != -1: # return # Update the shape name if hasattr(self, "_name_label"): name = self.model.get_item('name') infoBlurb = name.replace("/World/RGrps/", "") infoBlurb = infoBlurb.replace("/World/Subs/", "") infoBlurb = infoBlurb.replace("/World/Locs/", "") infoBlurb = infoBlurb.replace("/World/Types/", "") try: self._path_label.text = f"{infoBlurb}" except: self._path_label = ui.Label("Path:", height=20, alignment=ui.Alignment.LEFT) try: self._name_label.text = "Name: " + self.model.get_custom('res_name') except: self._name_label = ui.Label("Name:" , height=40, alignment=ui.Alignment.LEFT) try: self._grp_label.text = "ResGrp: " + self.model.get_custom('res_grp') except: self._grp_label = ui.Label("RGrp:", height=60, alignment=ui.Alignment.LEFT) try: self._type_label.text = "Type: " + self.model.get_custom('res_type') except: self._type_label = ui.Label("Type: ", height=80, alignment=ui.Alignment.LEFT) try: self._location_label.text = "Location: " + self.model.get_custom('res_loc') except: self._location_label = ui.Label("Location: ", height=100, alignment=ui.Alignment.LEFT) try: self._sub_label.text = "Sub: " + self.model.get_custom('res_sub') except: self._sub_label = ui.Label("Sub: " , height=120, alignment=ui.Alignment.LEFT) try: self._cost_label.text = "Cost: " + self.model.get_custom('res_cost') except: self._cost_label = ui.Label("Cost:", height=140, alignment=ui.Alignment.LEFT)
8,503
Python
40.082125
117
0.57662
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/button.py
from .constant import COLORS, LightColors, DarkColors, MouseKey from .style import get_ui_style from .rectangle import DashRectangle, OpaqueRectangle from omni import ui class InvisibleButton(ui.Button): STYLE = { "InvisibleButton": {"background_color": COLORS.TRANSPARENT}, "InvisibleButton.Label": {"color": COLORS.TRANSPARENT}, } def __init__(self, *arg, **kwargs): kwargs["style"] = self.STYLE kwargs["style_type_name_override"] = "InvisibleButton" super().__init__("##INVISIBLE", **kwargs) class DashButton: def __init__( self, height=0, name=None, image_source=None, image_size=16, image_padding=7, dash_padding_x=2, padding=6, clicked_fn=None, alignment=ui.Alignment.LEFT, ): self._on_clicked_fn = clicked_fn with ui.ZStack(height=height): self._rectangle = OpaqueRectangle(height=height, name=name, style_type_name_override="Button") with ui.HStack(spacing=0): ui.Spacer(width=dash_padding_x + padding) if alignment == ui.Alignment.CENTER: ui.Spacer() self._build_image_label( image_source=image_source, name=name, image_size=image_size, image_padding=image_padding ) ui.Spacer() elif alignment == ui.Alignment.RIGHT: ui.Spacer() self._build_image_label( image_source=image_source, name=name, image_size=image_size, image_padding=image_padding ) else: self._build_image_label( image_source=image_source, name=name, image_size=image_size, image_padding=image_padding ) ui.Spacer() ui.Spacer(width=dash_padding_x) DashRectangle(500, height, padding_x=dash_padding_x) if clicked_fn: self._rectangle.set_mouse_pressed_fn(lambda x, y, btn, flag: self._on_clicked()) @property def enabled(self): return self._label.enabled @enabled.setter def enabled(self, value): self._label.enabled = value def _build_image_label(self, image_source=None, name=None, image_size=16, image_padding=7): if image_source: with ui.VStack(width=image_size + 2 * image_padding): ui.Spacer() ui.Image( image_source, width=image_size, height=image_size, name=name, style_type_name_override="Button.Image", ) ui.Spacer() self._label = ui.Label("Add", width=0, name=name, style_type_name_override="Button.Label") def _on_clicked(self): if self._label.enabled: if self._on_clicked_fn is not None: self._on_clicked_fn() class ImageButton: LIGHT_STYLE = { "ImageButton": {"background_color": COLORS.TRANSPARENT, "border_width": 0, "border_radius": 2.0}, "ImageButton:hovered": {"background_color": LightColors.ButtonHovered}, "ImageButton:pressed": {"background_color": LightColors.ButtonPressed}, "ImageButton:selected": {"background_color": LightColors.ButtonSelected}, } DARK_STYLE = { "ImageButton": {"background_color": COLORS.TRANSPARENT, "border_width": 0, "border_radius": 2.0}, "ImageButton:hovered": {"background_color": 0xFF373737}, "ImageButton:selected": {"background_color": 0xFF1F2123}, } UI_STYLES = {"NvidiaLight": LIGHT_STYLE, "NvidiaDark": DARK_STYLE} def __init__( self, name, width, height, image, clicked_fn, tooltip=None, visible=True, enabled=True, activated=False, tooltip_fn=None, ): self._name = name self._width = width self._height = height self._tooltip = tooltip self._tooltip_fn = tooltip_fn self._visible = visible self._enabled = enabled self._image = image self._clicked_fn = clicked_fn self._activated = activated self._panel = None self._bkground_widget = None self._image_widget = None self._mouse_x = 0 self._mouse_y = 0 def create(self, style=None, padding_x=2, padding_y=2): ww = self.get_width() hh = self.get_height() if style is None: style = ImageButton.UI_STYLES[get_ui_style()] self._panel = ui.ZStack(spacing=0, width=ww, height=hh, style=style) with self._panel: with ui.Placer(offset_x=0, offset_y=0): self._bkground_widget = ui.Rectangle( name=self._name, style_type_name_override="ImageButton", width=ww, height=hh ) self._bkground_widget.visible = self._visible and self._enabled with ui.Placer(offset_x=padding_x, offset_y=padding_y): self._image_widget = ui.Image( self._image, width=ww - padding_x * 2, height=hh - padding_y * 2, fill_policy=ui.FillPolicy.STRETCH, mouse_pressed_fn=(lambda x, y, key, m: self._on_mouse_pressed(x, y, key)), mouse_released_fn=(lambda x, y, key, m: self._on_mouse_released(x, y, key)), opaque_for_mouse_events=True, ) if self._bkground_widget is None or self._image_widget is None: return if self._tooltip: self._image_widget.set_tooltip(self._tooltip) if self._tooltip_fn: self._tooltip_fn(self._image_widget, self._tooltip) if not self._enabled: self._bkground_widget.enabled = False self._image_widget.enabled = False def destroy(self): if self._panel: self._panel.clear() if self._bkground_widget: self._bkground_widget = None if self._image_widget: self._image_widget = None @property def enabled(self): return self._enabled @enabled.setter def enabled(self, value): self.enable(value) def get_width(self): return self._width def get_height(self): return self._height def get_widget_pos(self): x = self._bkground_widget.screen_position_x y = self._bkground_widget.screen_position_y return (x, y) def enable(self, enabled): if self._enabled != enabled: self._enabled = enabled self._bkground_widget.visible = enabled and self._visible self._image_widget.enabled = enabled return False def set_tooltip(self, tooltip): self._tooltip = tooltip if self._image_widget is not None: self._image_widget.set_tooltip(self._tooltip) def set_tooltip_fn(self, tooltip_fn: callable): self._tooltip_fn = tooltip_fn if self._image_widget is not None: self._image_widget.set_tooltip_fn(lambda w=self._image_widget, name=self._tooltip: tooltip_fn(w, name)) def is_visible(self): return self._visible def set_visible(self, visible=True): if self._visible != visible: self._visible = visible self._bkground_widget.visible = visible and self._enabled self._image_widget.visible = visible def identify(self, name): return self._name == name def get_name(self): return self._name def is_activated(self): return self._activated def activate(self, activated=True): if self._activated == activated: return False self._activated = activated if self._bkground_widget is not None: self._bkground_widget.selected = activated def set_image(self, image): if self._image != image: self._image = image self._image_widget.source_url = image return False def _on_mouse_pressed(self, x, y, key): if not self._enabled: return # For left button, we do trigger the click event on mouse_released. # For other buttons, we trigger the click event right now since Widget will never has # mouse_released event for any buttons other than left. if key != MouseKey.LEFT: self._clicked_fn(key, x, y) else: self._mouse_x = x self._mouse_y = y def _on_mouse_released(self, x, y, key): if self._enabled: if key == MouseKey.LEFT: self._clicked_fn(MouseKey.LEFT, x, y) class SimpleImageButton(ImageButton): def __init__(self, image, size, clicked_fn=None, name=None, style=None, padding=2): self._on_clicked_fn = clicked_fn if name is None: name = "default_image_btn" super().__init__(name, size, size, image, self._on_clicked) self.create(style=style, padding_x=padding, padding_y=padding) @property def clicked_fn(self): return self._on_clicked_fn @clicked_fn.setter def clicked_fn(self, value): self._on_clicked_fn = None def _on_clicked(self, button, x, y): if self._on_clicked_fn: self._on_clicked_fn() class BoolImageButton(ImageButton): def __init__(self, true_image, false_image, size, state=True, clicked_fn=None): self._true_image = true_image self._false_image = false_image self._state = state self._on_clicked_fn = clicked_fn super().__init__("default_image_btn", size, size, self._get_image(), self._on_clicked) self.create() @property def state(self): return self._state @state.setter def state(self, value): self.set_state(value, notify=False) def set_state(self, state, notify=False): self._state = state self.set_image(self._get_image()) if notify and self._on_clicked_fn: self._on_clicked_fn(self._state) def _on_clicked(self, button, x, y): self.set_state(not self._state, notify=True) def _get_image(self): return self._true_image if self._state else self._false_image
10,437
Python
32.242038
115
0.566734
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/group_type.py
from .group_base import GroupBase from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf from .math_utils import calcPlaneSizeForGroup from .prim_utils import cleanup_prim_path import locale import asyncio import carb import omni.client import omni.kit.app import omni.ui as ui import omni.usd import omni.kit.commands class TypeGrpView(GroupBase): def __init__(self, viewPath:str, scale:float, upAxis:str, shapeUpAxis:str, symPlanes:bool, binPack:bool): self._scale = scale self._upAxis = upAxis self._shapeUpAxis = shapeUpAxis self._view_path = viewPath self._symPlanes = symPlanes self._binPack = binPack super().__init__() def calcGroupPlaneSizes(self): self._dataStore._lcl_groups = [] self._dataStore._lcl_sizes = [] if len(self._dataStore._type_count) == 0: self._dataManager.refresh_data() #check it again if len(self._dataStore._type_count) == 0: return 0 # ---------- NO DATA #Clone the location groups gpz = self._dataStore._type_count.copy() #How big should the groups be? for grp in gpz: size = calcPlaneSizeForGroup( scaleFactor=self._scale, resourceCount=self._dataStore._type_count.get(grp) ) #mixed plane sizes self._dataStore._lcl_sizes.append(size) grp = cleanup_prim_path(self, grp) self._dataStore._lcl_groups.append({ "group":grp, "size":size }) #Should the groups all be the same size ? if self._symPlanes: self._dataStore._lcl_sizes.sort(reverse=True) maxPlaneSize = self._dataStore._lcl_sizes[0] #largest plane groupCount = len(self._dataStore._lcl_sizes) #count of groups #Reset plane sizes self._dataStore._lcl_sizes = [] for count in range(0,groupCount): self._dataStore._lcl_sizes.append(maxPlaneSize) self._dataStore._lcl_groups = [] for grp in gpz: self._dataStore._lcl_groups.append({ "group":grp, "size":maxPlaneSize }) def calulateCosts(self): for g in self._dataStore._lcl_groups: #Get the cost by resource group locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' ) try: self._cost = str(locale.currency(self._dataStore._type_cost[g])) except: self._cost = "" # blank not 0, blank means dont show it at all def selectGroupPrims(self): self.paths = [] base = Sdf.Path("/World/Types") for grp in self._dataStore.map_group.keys(): grp_path = base.AppendPath(cleanup_prim_path(self, grp)) self.paths.append(str(grp_path)) omni.kit.commands.execute('SelectPrimsCommand', old_selected_paths=[], new_selected_paths=self.paths, expand_in_stage=True) #Abstact to load resources def loadResources(self): self.view_path = Sdf.Path(self.root_path.AppendPath(self._view_path)) if (len(self._dataStore._lcl_groups)) >0 : #Cycle all the loaded groups for grp in self._dataStore._lcl_groups: carb.log_info(grp["group"]) #Cleanup the group name for a prim path group_prim_path = self.view_path.AppendPath(grp["group"]) #match the group to the resource map for key, values in self._dataStore._map_type.items(): #Is this the group? if key == grp["group"]: asyncio.ensure_future(self.loadGroupResources(key, group_prim_path, values)) def selectGroupPrims(self): self.paths = [] stage = omni.usd.get_context().get_stage() base = Sdf.Path("/World/Types") curr_prim = stage.GetPrimAtPath(base) for prim in Usd.PrimRange(curr_prim): # only process shapes and meshes tmp_path = str(prim.GetPath()) if '/CollisionMesh' not in tmp_path: if '/CollisionPlane' not in tmp_path: self.paths.append(tmp_path) # for grp in self._dataStore._map_subscription.keys(): # grp_path = base.AppendPath(cleanup_prim_path(self, grp)) # self.paths.append(str(grp_path)) omni.kit.commands.execute('SelectPrimsCommand', old_selected_paths=[], new_selected_paths=self.paths, expand_in_stage=True)
4,647
Python
31.732394
109
0.572628
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/scatter_on_planes.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["scatterOnFixedPlane"] from typing import List, Optional import random from pxr import Gf def scatterOnFixedPlane( count: List[int], distance: List[float], scaleFactor:float=1.0 ): """ Returns generator with pairs containing transform matrices and ids to arrange multiple objects. ### Arguments: `count: List[int]` Number of matrices to generage per axis `distance: List[float]` The distance between objects per axis """ vectors = {} id_cnt = 0 for i in range(count[0]): x = (i - 0.5 * (count[0] - 1)) * distance[0]*scaleFactor for j in range(count[1]): y = (j - 0.5 * (count[1] - 1)) * distance[1]*scaleFactor for k in range(count[2]): z = (k - 0.5 * (count[2] - 1)) * distance[2]*scaleFactor #yield([x, y, z]) vec_id = id_cnt vec = {vec_id: Gf.Vec3f(x,y,z)} vectors.update(vec) id_cnt = id_cnt +1 return vectors
1,510
Python
25.982142
76
0.602649
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/style_button.py
__all__ = ["button_style"] button_styles = { "Button": { "border_width": 0.5, "border_radius": 0.0, "margin": 4.0, "padding": 4.0, "font_size" : 20 }, "Button::subs": { "background_color": 0x0096C8FA, "background_gradient_color": 0xFFFAB26D, "border_color": 0xFFFD761D, }, "Button.Label::subs": { "color": 0xFFFFFFFF, "font_size" : 16 }, "Button::subs:hovered": { "background_color": 0xFFFF6E00, "background_gradient_color": 0xFFFFAE5A }, "Button::subs:pressed": { "background_color": 0xFFFAB26D, "background_gradient_color": 0xFFFF7E09 }, "Button::clear": { "background_color": 0xFFFF7E09, "background_gradient_color": 0xFFFAB26D, "border_color": 0xFFFD761D, }, "Button.Label::clear": { "color": 0xFFFFFFFF, "font_size" : 16 }, "Button::rs": { "background_color": 0xFFFF7E09, "background_gradient_color": 0xFFFAB26D, "border_color": 0xFFFD761D, }, "Button::rs:hovered": { "background_color": 0xFFFF6E00, "background_gradient_color": 0xFFFFAE5A, "border_color": 0xFFFD761D, }, "Button::rs:pressed": { "background_color": 0xFFFAB26D, "background_gradient_color": 0xFFFF7E09, "border_color": 0xFFFD761D, }, "Button.Label::rs": { "color": 0xFFFFFFFF, "font_size" : 16 }, "Button::clr": { "background_color": 0xEE7070, "background_gradient_color": 0xFFFAB26D, "border_color": 0xFFFD761D, }, "Button.Label::clr": { "color": 0xFFFFFFFF, "font_size" : 16 }, "Button::clr:hovered": { "background_color": 0xFFFF6E00, "background_gradient_color": 0xFFFFAE5A }, "Button::clr:pressed": { "background_color": 0xFFFAB26D, "background_gradient_color": 0xFFFF7E09 }, "Button::help": { "background_color": 0x6464c8, "background_gradient_color": 0xFFFAB26D, "border_color": 0xFFFD761D, }, "Button.Label::help": { "color": 0xFFFFFFFF, "font_size" : 16 }, "Button::help:hovered": { "background_color": 0xFFFF6E00, "background_gradient_color": 0xFFFFAE5A }, "Button::help:pressed": { "background_color": 0xFFFAB26D, "background_gradient_color": 0xFFFF7E09 }, }
2,488
Python
25.478723
48
0.546222
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/pillow_text.py
import glob from PIL import Image, ImageDraw, ImageFont, ImageDraw import io import asyncio import os import time import sys import os.path as path from pathlib import Path import omni.kit.pipapi from datetime import datetime, timedelta #Create and draw images in async contexxt async def draw_text_on_image_at_position_async ( input_image_path:str, output_image_path:str, textToDraw:str, costToDraw:str, x:int, y:int, fillColor:str, font:str, fontSize:int): await draw_text_on_image_at_position( input_image_path, output_image_path, textToDraw, costToDraw, x, y, fillColor, font, fontSize ) def is_file_older_than_x_days(file, days=1): file_time = path.getmtime(file) # Check against 24 hours return ((time.time() - file_time) / 3600 > 24*days) #Create a new image with text def draw_text_on_image_at_position( input_image_path:str, output_image_path:str, textToDraw:str, costToDraw:str, x:int, y:int, fillColor:str, font:str, fontSize:int): makeFile = False if not os.path.exists(input_image_path): print("No src file: " + str(input_image_path)) return if os.path.exists(output_image_path): if is_file_older_than_x_days(output_image_path, 30): makeFile = True else: makeFile = True if makeFile: print("Refreshing Image " + str(output_image_path) + " with text: " + textToDraw + " cst: " + costToDraw) #font = ImageFont.load(str(font)) font = ImageFont.truetype(str(font), fontSize, encoding="unic") print("Loading src file: " + str(input_image_path)) image = Image.open(input_image_path) image = image.rotate(270, expand=1) draw = ImageDraw.Draw(image) textW, textH = draw.textsize(textToDraw, font) # how big is our text costW, costH = draw.textsize(costToDraw, font) # how big is cost text if costToDraw != "": costToDraw = str(costToDraw) + " /month" draw.text((x,y-75), textToDraw, font_size=fontSize,anchor="ls", font=font, fill=fillColor) draw.text((x,y+75), costToDraw, font_size=(fontSize-50), anchor="ls", font=font, fill="red") else: draw.text((x, y-50), textToDraw, font_size=fontSize,anchor="ls", font=font, fill=fillColor) image = image.rotate(-270, expand=1) with open(output_image_path, 'wb') as out_file: image.save(out_file, 'PNG') #image.save(output_image_path) # def create_image_with_text(output_image_path:str, textToDraw:str, x:int, y:int, h:int, w:int, color:str, alignment:str, fillColor:str, fontPath:str, fontSize:int): # image = Image.new("RGB", (h, w), color) # draw = ImageDraw.Draw(image) # # Load font from URI # #font1 = "https://github.com/googlefonts/Arimo/raw/main/fonts/ttf/Arimo-Regular.ttf" # font1 = 'https://github.com/googlefonts/roboto/blob/main/src/hinted/Roboto-Black.ttf?raw=true' # font = load_font_from_uri(fontSize, font1) # #font = ImageFont.truetype(fontPath, layout_engine=ImageFont.LAYOUT_BASIC, size=fontSize) # draw.text((x, y), textToDraw, font=font, anchor="ls", fill=fillColor) # image.save(output_image_path) #angled text #https://stackoverflow.com/questions/245447/how-do-i-draw-text-at-an-angle-using-pythons-pil def draw_text_90_into(text: str, into, at): # Measure the text area font = ImageFont.truetype (r'C:\Windows\Fonts\Arial.ttf', 16) wi, hi = font.getsize (text) # Copy the relevant area from the source image img = into.crop ((at[0], at[1], at[0] + hi, at[1] + wi)) # Rotate it backwards img = img.rotate (270, expand = 1) # Print into the rotated area d = ImageDraw.Draw (img) d.text ((0, 0), text, font = font, fill = (0, 0, 0)) # Rotate it forward again img = img.rotate (90, expand = 1) # Insert it back into the source image # Note that we don't need a mask into.paste (img, at) if __name__ == "__main__": #create_image_with_text("temp\\output2.jpg", "Mmmuuuurrrrrrrrrr", 10.0,525,575,575,"white", "left", "black", "temp\\airstrike.ttf", 44) draw_text_on_image_at_position("temp\\tron_grid_test.png", "temp\\output_test.png", "defaultresourcegroup_ea","$299.00", 200,1800, "yellow", 110) #'https://github.com/googlefonts/roboto/blob/main/src/hinted/Roboto-Black.ttf?raw=true' # input_image_path:str, # output_image_path:str, # textToDraw:str, # costToDraw:str, # x:int, y:int, # fillColor:str, fontSize:int):
4,635
Python
30.753424
169
0.63754
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/math_utils.py
# Calculate the size of the Plane to place Resource Group X's items on # using 2D spaces here, will locate all the items on a plane the size of the group # 1x, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, etc... what size do we need? import math from pxr import Gf __all__ = ["calcPlaneSizeForGroup"] from re import I from typing import List from .scatter_on_planes import scatterOnFixedPlane # Calculate the size of the Group Plane to create def calcPlaneSizeForGroup(scaleFactor:float, resourceCount: int): # 1-30 squared, return the square root, this is the size of the space needed for i in [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484,529,576,625,676,729, 784, 841,900]: if resourceCount > 0 and resourceCount <= i: return float(((math.sqrt(i)*100)*scaleFactor)+1) #FIGURES OUT WHERE TO PUT THE PRIMS ON A VARIABLE SIZED-PLANE def calculateGroupTransforms(self, scale:float, count:int ): #ex 400.0 -> 800 - 400 plane is 800x800 plane_size = (calcPlaneSizeForGroup(scaleFactor=scale, resourceCount=count)*2) plane_class = ((plane_size/100)/2) #distance of objects depending on grid size.. dist = plane_size / plane_class #Use NVIDIAs Scatter algo to position on varying sized planes transforms = scatterOnFixedPlane( count=[int(plane_class), int(plane_class), 1], # Distribute accross the plane class distance=[dist,dist,dist], scaleFactor=scale ) #there should be at least one transform if len(transforms) == 0: vec_id = 0 vec = {vec_id: Gf.Vec3f(0,0,0)} transforms[0] = vec return transforms
1,686
Python
32.739999
146
0.677343
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/scatter_complex.py
__all__ = ["distributePlanes"] from typing import List, Optional import random from pxr import Gf def distributePlanes( UpAxis: 'Z', count: List[int], distance: List[float], sizes: List[float], randomization: List[float], seed: Optional[int] = None, scaleFactor:float=1.0 ): #print("UpAxis = " + UpAxis) random.seed(seed) if(UpAxis == 'Z'): nUpPlane = count[0]*count[1] elif(UpAxis == 'X'): nUpPlane = count[1]*count[2] else:#(UpAxis == 'Y'): nUpPlane = count[2]*count[0] for i in range(len(sizes)): iPlane = i % nUpPlane if(UpAxis == 'Z'): ix = iPlane // count[1] iy = iPlane % count[1] iz = i // nUpPlane elif(UpAxis == 'X'): iy = iPlane // count[2] iz = iPlane % count[2] ix = i // nUpPlane else:#(UpAxis == 'Y'): iz = iPlane // count[0] ix = iPlane % count[0] iy = i // nUpPlane x = ix*((distance[0]+sizes[i])*scaleFactor) * randomization[0] y = iy*((distance[1]+sizes[i])*scaleFactor) * randomization[1] z = iz*((distance[2]+sizes[i])*scaleFactor) * randomization[2] yield(Gf.Vec3d(x,y,z))
1,255
Python
24.632653
70
0.517928
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/style.py
from .constant import COLORS, LightColors, DarkColors, FontSize from omni import ui import carb.settings def get_ui_style(): settings = carb.settings.get_settings() style = settings.get_as_string("/persistent/app/window/uiStyle") if not style: style = "NvidiaDark" return style class DefaultWidgetStyle: LIGHT = { "Button": { "background_color": LightColors.Button, "border_radius": 2.0, "stack_direction": ui.Direction.LEFT_TO_RIGHT, }, "Button.Label": {"color": LightColors.Background}, "Button:hovered": {"background_color": LightColors.ButtonHovered}, "Button:pressed": {"background_color": LightColors.ButtonPressed}, "CollapsableFrame": { "color": COLORS.TRANSPARENT, "background_color": COLORS.TRANSPARENT, "secondary_color": COLORS.TRANSPARENT, }, "CollapsableFrame:hovered": {"secondary_color": COLORS.TRANSPARENT}, "CollapsableFrame:pressed": {"secondary_color": COLORS.TRANSPARENT}, "ComboBox": { "color": LightColors.Text, "background_color": LightColors.Background, "selected_color": LightColors.BackgroundSelected, "border_radius": 1, "padding_width": 0, "padding_height": 4, "secondary_padding": 8, }, "ComboBox:disabled": {"color": LightColors.TextDisabled}, "Field": {"background_color": LightColors.Background, "color": LightColors.Text, "border_radius": 2}, "Plot": {"background_color": LightColors.Background, "color": LightColors.TextSelected, "border_radius": 1}, "Triangle": {"background_color": LightColors.Background}, } DARK = { "Button": { "background_color": DarkColors.Button, "border_radius": 2.0, "stack_direction": ui.Direction.LEFT_TO_RIGHT, }, "Button.Label": {"color": DarkColors.Text}, "Button:hovered": {"background_color": DarkColors.ButtonHovered}, "Button:pressed": {"background_color": DarkColors.ButtonPressed}, "CollapsableFrame": { "color": COLORS.TRANSPARENT, "background_color": COLORS.TRANSPARENT, "secondary_color": COLORS.TRANSPARENT, }, "CollapsableFrame:hovered": {"secondary_color": COLORS.TRANSPARENT}, "CollapsableFrame:pressed": {"secondary_color": COLORS.TRANSPARENT}, "ComboBox": { "color": DarkColors.Text, "background_color": DarkColors.Background, "selected_color": DarkColors.BackgroundSelected, "border_radius": 1, }, "ComboBox:disabled": {"color": DarkColors.TextDisabled}, "Field": {"background_color": DarkColors.Background, "color": DarkColors.Text, "border_radius": 2}, "Plot": {"background_color": DarkColors.Background, "color": DarkColors.TextSelected, "border_radius": 1}, "Triangle": {"background_color": DarkColors.Background}, } @staticmethod def get_style(ui_style=None): if ui_style is None: ui_style = get_ui_style() if ui_style == "NvidiaDark": return DefaultWidgetStyle.DARK else: return DefaultWidgetStyle.LIGHT
3,323
Python
39.048192
116
0.607884
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/omni_utils.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["get_selection", "duplicate_prims", "create_prims, create_shaders"] from re import I from typing import List import omni.usd import asyncio import omni.kit.commands from pxr import Sdf, Gf, Usd, UsdGeom from .prim_utils import create_plane def get_selection() -> List[str]: """Get the list of currently selected prims""" return omni.usd.get_context().get_selection().get_selected_prim_paths() def duplicate_prims(transforms: List = [], prim_names: List[str] = [], target_path: str = "", mode: str = "Copy"): """ Returns generator with pairs containing transform matrices and ids to arrange multiple objects. ### Arguments: `transforms: List` Pairs containing transform matrices and ids to apply to new objects `prim_names: List[str]` Prims to duplicate `target_path: str` The parent for the new prims `mode: str` "Reference": Create a reference of the given prim path "Copy": Create a copy of the given prim path "PointInstancer": Create a PointInstancer """ if mode == "PointInstancer": omni.kit.commands.execute( "ScatterCreatePointInstancer", path_to=target_path, transforms=transforms, prim_names=prim_names, ) return usd_context = omni.usd.get_context() # Call commands in a single undo group. So the user will undo everything # with a single press of ctrl-z with omni.kit.undo.group(): # Create a group omni.kit.commands.execute("CreatePrim", prim_path=target_path, prim_type="Scope") for i, matrix in enumerate(transforms): id = matrix[1] matrix = matrix[0] path_from = Sdf.Path(prim_names[id]) path_to = Sdf.Path(target_path).AppendChild(f"{path_from.name}{i}") # Create a new prim if mode == "Copy": omni.kit.commands.execute("CopyPrims", paths_from=[path_from.pathString], paths_to=[path_to.pathString]) elif mode == "Reference": omni.kit.commands.execute( "CreateReference", usd_context=usd_context, prim_path=path_from, path_to=path_to, asset_path="" ) else: continue # Move omni.kit.commands.execute("TransformPrim", path=path_to, new_transform_matrix=matrix) def create_prims(up_axis:str, plane_size:List[float], transforms: List = [], prim_names: List[str] = [], parent_path:str = ""): """ Returns generator with pairs containing transform matrices and ids to arrange multiple objects. ### Arguments: `transforms: List` Pairs containing transform matrices and ids to apply to new objects `prim_names: List[str]` Prims to create `target_paths: List[str]` The paths for the new prims """ usd_context = omni.usd.get_context() stage_ref = usd_context.get_stage() # Call commands in a single undo group. So the user will undo everything # with a single press of ctrl-z #with omni.kit.undo.group(): #print("Prim count: " + str(len(prim_names))) # Create a group #omni.kit.commands.execute("CreatePrim", prim_path=parent_path, prim_type="Scope") i=0 for matrix in enumerate(transforms): if (i >= len(prim_names)): continue path = Sdf.Path(parent_path).AppendPath(prim_names[i]["group"]) print(str(i) + " adding plane:" + str(path) + " " + str(plane_size[i]) + " @ " + str(matrix[1])) if prim_names[i]["group"] == "observation_deck": matrix[1][0] = matrix[1][0] + 500 matrix[1][1] = matrix[1][1] + 500 matrix[1][2] = matrix[1][2] + 500 omni.kit.commands.execute('AddGroundPlaneCommand', stage=stage_ref, planePath=str(path), #'/RGrp/Test_Positioning' axis='Z', size=plane_size[i], position=matrix[1], color=Gf.Vec3f(0,0,0)) i=i+1 def get_selected_prims(self): """ Get the currently selected prims in the scene """ context = omni.usd.get_context() stage = context.get_stage() prims = [stage.GetPrimAtPath(m) for m in context.get_selection().get_selected_prim_paths()] return prims async def create_shaders(base_path:str, prim_name:str ): prim_path = Sdf.Path(base_path) prim_path = prim_path.AppendPath("CollisionMesh") #Select the Collision Mesh omni.kit.commands.execute('SelectPrims', old_selected_paths=[''], new_selected_paths=[str(prim_path)], expand_in_stage=True) #print("Creating Shader: " + str(prim_path)) #Create a Shader for the Mesh omni.kit.commands.execute('CreateAndBindMdlMaterialFromLibrary', mdl_name='OmniPBR.mdl', mtl_name='OmniPBR', prim_name=prim_name, mtl_created_list=None, bind_selected_prims=True) await omni.kit.app.get_app().next_update_async()
5,525
Python
32.490909
127
0.618643
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/data_manager.py
# This class manages both the offline data and online data from typing import Dict from .Singleton import Singleton from .csv_data_manager import CSVDataManager from .azure_data_manager_stub import AzureDataManager #Azure API disabled in this version, due to: from .data_store import DataStore from .prim_utils import cleanup_prim_path, draw_image from .azure_resource_map import shape_usda_name from .pillow_text import draw_text_on_image_at_position_async, draw_text_on_image_at_position from pathlib import Path from pxr import Sdf from .prim_utils import get_font_size_from_length import omni.kit.notification_manager as nm import omni import asyncio import logging import shutil import locale import carb # User either connects to Azure with connection info # OR User can import data from data files # depending on the mode, this class should return the same data # it is a DataManager type resource # User clicks Connect, Or Load, Goal is the same, load data from azure or files # and give the user some basic info to show the connection / import worked. # now connected, user can load different sets of resources and view then in different ways. ASYNC_ENABLED = True CURRENT_PATH = Path(__file__).parent DATA_PATH = CURRENT_PATH.joinpath("temp") RES_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("data\\resources") IMPORTS_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("data\\import") @Singleton class DataManager: def __init__(self): self._callbacks = [] logging.getLogger("asyncio").setLevel(logging.WARNING) carb.log_info("DataManager Created.") self._dataStore = DataStore.instance() self._offlineDataManager = CSVDataManager() self._onlineDataManager = AzureDataManager() self._dataStore.Load_Config_Data() self.refresh_data() #shut it down... def destroy(self): carb.log_info("DataManager Destroyed.") self._callbacks = [] self._offlineDataManager = None self._onlineDataManager = None self._dataStore = None #this seems to cause problems #add a callback for model changed def add_model_changed_callback(self, func): self._callbacks.append(func) #Invoke the callbacks that want to know when the data changes def _model_changed(self): for c in self._callbacks: c() #Load data from file def load_csv_files(self): self._dataStore._groups.clear() self._dataStore._resources.clear() self._lcl_sizes = [] self._lcl_groups = [] self._lcl_resources = [] self._dataStore._source_of_data = "OfflineData" self._dataStore.Save_Config_Data() #Load data from Cloud API self._offlineDataManager.loadFiles() #Aggregate the info, wait for it if len(self._dataStore._groups) >0: asyncio.ensure_future(self.process_data()) #Load data from Azure API def load_from_api(self): self._dataStore._groups.clear() self._dataStore._resources.clear() self._lcl_sizes = [] self._lcl_groups = [] self._lcl_resources = [] self._dataStore._source_of_data = "LiveAzureAPI" self._dataStore.Save_Config_Data() #Load the data and process it if self._onlineDataManager.connect(): self._onlineDataManager.load_data() #wait for data to finish loading if len(self._dataStore._groups) >0: asyncio.ensure_future(self.process_data()) def wipe_data(self): self._dataStore.wipe_data() self._model_changed() def refresh_data(self): if self._dataStore: if self._dataStore._source_of_data =="OfflineData": self.load_csv_files() carb.log_info("CSV Data Refreshed.") elif self._dataStore._source_of_data == "LiveAzureAPI": self.load_from_api() carb.log_info("Live Data Refreshed.") else: carb.log_info("Load some data!") #Load the "All resources (Shapes) set" #This sample contains 1 resource per group def load_sample_resources(self): if self._dataStore: self._dataStore.wipe_data() self._dataStore._source_of_data = "SampleFiles" src_filel = IMPORTS_PATH.joinpath("TestShapes_RG.csv") src_file2 = IMPORTS_PATH.joinpath("TestShapes_all.csv") self.load_and_process_manual(src_filel, src_file2) #Load the "Small Company sample" def load_small_company(self): if self._dataStore: self._dataStore.wipe_data() self._dataStore._source_of_data = "SampleFiles" src_filel = IMPORTS_PATH.joinpath("SmallCompany_RG.csv") src_file2 = IMPORTS_PATH.joinpath("SmallCompany_all.csv") self.load_and_process_manual(src_filel, src_file2) #Load the "Large Company sample" def load_large_company(self): if self._dataStore: self._dataStore.wipe_data() self._dataStore._source_of_data = "SampleFiles" src_filel = IMPORTS_PATH.joinpath("LargeCompany_RG.csv") src_file2 = IMPORTS_PATH.joinpath("LargeCompany_all.csv") self.load_and_process_manual(src_filel, src_file2) #load the files async def load_and_process_manual(self, grpFile, rgFIle ): #load the files self._offlineDataManager.loadFilesManual(grpFile, rgFIle) #Aggregate the info if len(self._dataStore._groups) >0: asyncio.ensure_future(self.process_data()) #Aggregate subscription, resources counts to DataManager Dictionaries async def process_data(self): carb.log_info("Processing Data...") #For every resrouce... for key in self._dataStore._resources: obj = self._dataStore._resources[key] #yield control await asyncio.sleep(0) ### AGGREGATE COUNTS self.AggregateCountsAsync(obj) ### AGGREGATE COSTS self.AggregateCostsAsync(obj) ### MAP RESOURCES TO AGGREGATES self.MapResourcesToGroupsAsync(obj) #Pre-create images for the groups carb.log_info("Creating images..") await self.CreateImagesForGroups() carb.log_info("Creating images complete..") #let everyone know, stuff changed... self._model_changed() #output aggregation results to console carb.log_info("Data processing complete..") carb.log_info(self._dataStore._source_of_data + " data refreshed.") carb.log_info(str(len(self._dataStore._resources)) + " Resources loaded from " + self._dataStore._source_of_data) carb.log_info(str(len(self._dataStore._groups)) + " Groups loaded from " + self._dataStore._source_of_data) #Create Images for all the maps async def CreateImagesForGroups(self): carb.log_info("Processing images async.") #go through all the maps and create images #this will save a ton of time later if self._dataStore._bgl_file_path is None: return if self._dataStore._bgm_file_path is None: return if self._dataStore._bgh_file_path is None: return src_filel = RES_PATH.joinpath(self._dataStore._bgl_file_path) src_filem = RES_PATH.joinpath(self._dataStore._bgm_file_path) src_fileh = RES_PATH.joinpath(self._dataStore._bgh_file_path) src_image = src_filel #SUBSCRIPTIONS #We need to create images for each group for rec in self._dataStore._map_subscription: recText = rec #Name of subscription #Let the Ui breathe ;) #TODO async #await omni.kit.app.get_app().next_update_async() output_file = DATA_PATH.joinpath(recText + ".png") cost_output_file = DATA_PATH.joinpath(recText + "-cost.png") textToDraw = recText costToDraw ="" #We dont care here if the user wants costs or not, we are pre-making images try: locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' ) rawCost = float(self._dataStore._subscription_cost[recText]) costToDraw = locale.currency(self._dataStore._subscription_cost[recText]) carb.log_info ("RawCost: " + recText + " $" + str(rawCost)) carb.log_info ("Cost: " + recText + " $" + str(costToDraw)) if rawCost < 500: src_image = src_filel if rawCost > 500 and rawCost < 1500: src_image = src_filem if rawCost > 1500: src_image = src_fileh except: costToDraw="" #todo change image based on score draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="") draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw) #LOCATIONS #We need to create images for each group for rec in self._dataStore._map_location: recText = rec #Let the Ui breathe ;) #await omni.kit.app.get_app().next_update_async() temp_file = recText + ".png" output_file = DATA_PATH.joinpath(temp_file) cost_output_file = DATA_PATH.joinpath(recText + "-cost.png") textToDraw = recText costToDraw ="" try: locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' ) rawCost = float(self._dataStore._location_cost[recText]) costToDraw = locale.currency(self._dataStore._location_cost[recText]) carb.log_info ("RawCost: " + recText + " $" + str(rawCost)) carb.log_info ("Cost: " + recText + " $" + str(costToDraw)) if rawCost < 500: src_image = src_filel if rawCost > 500 and rawCost < 1500: src_image = src_filem if rawCost > 1500: src_image = src_fileh except: costToDraw="" draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="") draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw) #RESOURCE GROUPS #We need to create images for each group for rec in self._dataStore._map_group: recText = rec #Let the Ui breathe ;) #await omni.kit.app.get_app().next_update_async() output_file = DATA_PATH.joinpath(recText + ".png") cost_output_file = DATA_PATH.joinpath(recText + "-cost.png") textToDraw = recText costToDraw ="" try: locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' ) rawCost = float(self._dataStore._group_cost[rec]) costToDraw = locale.currency(self._dataStore._group_cost[recText]) carb.log_info ("RawCost: " + recText + " $" + str(rawCost)) carb.log_info ("Cost: " + recText + " $" + str(costToDraw)) if rawCost < 500: src_image = src_filel if rawCost > 500 and rawCost < 1500: src_image = src_filem if rawCost > 1500: src_image = src_fileh except: costToDraw="" draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="") draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw) #TYPES #We need to create images for each group for rec in self._dataStore._map_type: recText = rec #Let the Ui breathe ;) #await omni.kit.app.get_app().next_update_async() output_file = DATA_PATH.joinpath(recText + ".png") cost_output_file = DATA_PATH.joinpath(recText + "-cost.png") textToDraw = recText costToDraw ="" try: locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' ) rawCost = float(self._dataStore._type_cost[recText]) costToDraw = locale.currency(self._dataStore._type_cost[recText]) carb.log_info ("RawCost: " + recText + " $" + str(rawCost)) carb.log_info ("Cost: " + recText + " $" + str(costToDraw)) if rawCost < 500: src_image = src_filel if rawCost > 500 and rawCost < 1500: src_image = src_filem if rawCost > 1500: src_image = src_fileh except: costToDraw="" draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="") draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw) #TAGS #We need to create images for each group for rec in self._dataStore._map_tag: recText = rec #Let the Ui breathe ;) #await omni.kit.app.get_app().next_update_async() output_file = DATA_PATH.joinpath(recText + ".png") cost_output_file = DATA_PATH.joinpath(recText + "-cost.png") textToDraw = recText costToDraw ="" try: locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' ) rawCost = float(self._dataStore._tag_cost[recText]) costToDraw = locale.currency(self._dataStore._tag_cost[recText]) carb.log_info ("RawCost: " + recText + " $" + str(rawCost)) carb.log_info ("Cost: " + recText + " $" + str(costToDraw)) if rawCost < 500: src_image = src_filel if rawCost > 500 and rawCost < 1500: src_image = src_filem if rawCost > 1500: src_image = src_fileh except: costToDraw="" draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="") draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw) carb.log_info("Processing images complete..") #Calculate the low, min, max, mean costs and score each group according to its peers def ScoreCosts(self): pass #Async context def AggregateCostsAsync(self, obj): ### AGGREGATE COSTS #Cost per Sub subKey = cleanup_prim_path(self, obj["subscription"]) if subKey not in self._dataStore._subscription_cost.keys(): self._dataStore._subscription_cost[subKey] = float(obj["lmcost"]) else: self._dataStore._subscription_cost[subKey] = float(self._dataStore._subscription_cost[subKey]) + float(obj["lmcost"]) #Cost per Location locKey = cleanup_prim_path(self, obj["location"]) if locKey not in self._dataStore._location_cost.keys(): self._dataStore._location_cost[locKey] = float(obj["lmcost"]) else: self._dataStore._location_cost[locKey] = float(self._dataStore._location_cost[locKey]) + float(obj["lmcost"]) #Cost per Type typeKey = cleanup_prim_path(self, obj["type"]) if typeKey not in self._dataStore._type_cost.keys(): self._dataStore._type_cost[typeKey] = float(obj["lmcost"]) else: self._dataStore._type_cost[typeKey] = float(self._dataStore._type_cost[typeKey]) + float(obj["lmcost"]) #Cost per Group grpKey = cleanup_prim_path(self, obj["group"]) if grpKey not in self._dataStore._group_cost.keys(): self._dataStore._group_cost[grpKey] = float(obj["lmcost"]) else: self._dataStore._group_cost[grpKey] = float(self._dataStore._group_cost[grpKey]) + float(obj["lmcost"]) #Async Context def AggregateCountsAsync(self, obj): ### AGGREGATE COUNTS #Count per Sub subKey = cleanup_prim_path(self, obj["subscription"]) if subKey not in self._dataStore._subscription_count.keys(): self._dataStore._subscription_count[subKey] = 1 else: self._dataStore._subscription_count[subKey] = self._dataStore._subscription_count[subKey] + 1 #Count per Location locKey = cleanup_prim_path(self, obj["location"]) if locKey not in self._dataStore._location_count.keys(): self._dataStore._location_count[locKey] = 1 else: self._dataStore._location_count[locKey] = self._dataStore._location_count[locKey] + 1 #Count per Type typeKey = cleanup_prim_path(self, obj["type"]) if typeKey not in self._dataStore._type_count.keys(): self._dataStore._type_count[typeKey] = 1 else: self._dataStore._type_count[typeKey] = self._dataStore._type_count[typeKey] + 1 #Count per Group grpKey = cleanup_prim_path(self, obj["group"]) if grpKey not in self._dataStore._group_count.keys(): self._dataStore._group_count[grpKey] = 1 else: self._dataStore._group_count[grpKey] = self._dataStore._group_count[grpKey] + 1 #Given a resource, Map it to all the groups it belongs to. def MapResourcesToGroupsAsync(self, obj): #Get the mapped shape and figure out the prim path for the map # Set a default shape_to_render = "omniverse://localhost/Resources/3dIcons/scene.usd" #NAME,TYPE,RESOURCE GROUP,LOCATION,SUBSCRIPTION, LMCOST try: resName = obj["name"] typeName = cleanup_prim_path(self, obj["type"]) #needs to be clean, used to map to shapes group = obj["group"] location = obj["location"] sub = obj["subscription"] cost =obj["lmcost"] shape_to_render = shape_usda_name[typeName] except: carb.log_info("Error getting priom values - " + resName) # SUBSCRIPTION MAP self.map_objects(resName, typeName, group, location, sub, cost, "/Subs" ,shape_to_render, self._dataStore._map_subscription, obj, "subscription") # GROUP MAP self.map_objects(resName, typeName, group, location, sub, cost, "/RGrps", shape_to_render, self._dataStore._map_group, obj, "group") # TYPE MAP self.map_objects(resName, typeName, group, location, sub, cost, "/Types", shape_to_render, self._dataStore._map_type, obj, "type") # LOCATION MAP self.map_objects(resName, typeName, group, location, sub, cost, "/Locs", shape_to_render, self._dataStore._map_location, obj, "location") #TODO TAGMAP #self.map_objects(typeName, "/Tag", shape_to_render, self._dataStore._tag_map, obj, "tag") #Maps objects to create to each aggregate def map_objects(self, resName, typeName,grp, loc, sub, cost, root, shape, map, obj, field:str): cleaned_group_name = cleanup_prim_path(self, Name=obj[field]) carb.log_info(cleaned_group_name) map_obj = {"name": resName, "type":typeName, "shape":shape, "location":loc, "subscription":sub, "group":grp, "cost":cost } if cleaned_group_name not in map.keys(): #new map! map[cleaned_group_name] = [map_obj] else: #get the map for this group, add this item mapObj = map[cleaned_group_name] mapObj.append(map_obj) #passthrough to csv manager def select_file(self, fileType: str): self._offlineDataManager.select_file(fileType=fileType) def clicked_ok(self): pass def sendNotify(self, message:str, status:nm.NotificationStatus): # https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager# import omni.kit.notification_manager as nm ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok) nm.post_notification( message, hide_after_timeout=True, duration=5, status=status, button_infos=[ok_button] ) #-- SINGLETON SUPPORT def instance(self): """ Returns the singleton instance. Upon its first call, it creates a new instance of the decorated class and calls its `__init__` method. On all subsequent calls, the already created instance is returned. """ try: return self._instance except AttributeError: self._instance = self._decorated() return self._instance def __call__(self): raise TypeError('Singletons must be accessed through `instance()`.') def __instancecheck__(self, inst): return isinstance(inst, self._decorated)
21,491
Python
37.174067
159
0.593551
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/stage_position.py
__all__ = ["scatterWithPlaneSize"] from typing import List, Optional import random from pxr import Gf def scatterWithPlaneSize( count: List[int], distance: List[float], sizes: List[float], randomization: List[float], id_count: int = 1, seed: Optional[int] = None, scaleFactor:float=1.0 ): """ Returns generator with pairs containing transform matrices and ids to arrange multiple objects. ### Arguments: `count: List[int]`ce1dcf85-c041-4bb9-8275-4b96e70252a2 Number of matrices to generage per axis `distance: List[float]` The distance between objects per axis `randomization: List[float]` Random distance per axis `id_count: int` Count of differrent id `seed: int` If seed is omitted or None, the current system time is used. If seed is an int, it is used directly. """ print("Generating " + str(id_count) + " postions: " + str(count[0]) + "|" + str(count[1]) + "|" + str(count[2])) for i in range(id_count): if (sizes[i]>250): x = (i - 0.5 * (count[0] - 1)) * (distance[0]*scaleFactor) + (sizes[i]*2) else: x = (i - 0.5 * (count[0] - 1)) * (distance[0]*scaleFactor) + (sizes[i]*2) for j in range(count[1]): if (sizes[i]>250): y = (j - 0.5 * (count[1] - 1)) * (distance[1]*scaleFactor) + (sizes[i]*2) else: y = (j - 0.5 * (count[1] - 1)) * (distance[1]*scaleFactor) + (sizes[i]*2) for k in range(count[2]): if (sizes[i]>250): z = (k - 0.5 * (count[2] - 1)) * (distance[2]*scaleFactor) + (sizes[i]*2) else: z = (k - 0.5 * (count[2] - 1)) * (distance[2]*scaleFactor) + (sizes[i]*2) result = Gf.Vec3d(x,y,z) yield (result) def position_resource_on_target( planeSize: float, resourceNumber: int, ): # pass if (planeSize == 100.0): pass if (planeSize == 200.0): pass if (planeSize == 300.0): pass if (planeSize == 400.0): pass if (planeSize == 500.0): pass if (planeSize == 600.0): pass
2,305
Python
24.340659
116
0.509328
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/data_store.py
__all__ = ["Save_Config_Data", "Load_Config_Data"] import carb from .Singleton import Singleton import omni.ui as ui from .combo_box_model import ComboBoxModel from pathlib import Path import os CURRENT_PATH = Path(__file__).parent DATA_PATH = CURRENT_PATH.joinpath("temp") RES_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("data\\resources") @Singleton class DataStore(): def __init__(self): print("DataStore initialized") #Azure Resoruce Groups #NAME,SUBSCRIPTION,LOCATION self._groups = {} #All the reosurces #NAME,TYPE,RESOURCE GROUP,LOCATION,SUBSCRIPTION, LMCOST self._resources = {} #aggregated data (counts) self._aad_count = {} self._subscription_count = {} self._location_count = {} self._group_count = {} self._type_count = {} self._tag_count = {} #aggregated data (costs) self._aad_cost = {} self._subscription_cost = {} self._location_cost = {} self._group_cost = {} self._type_cost = {} self._tag_cost = {} #mapped resources (indexes) self._map_aad = {} self._map_subscription = {} self._map_location = {} self._map_group = {} self._map_type = {} self._map_tag = {} #track where the data last came from (state) self._source_of_data = "" self._use_symmetric_planes = False self._use_packing_algo = True self._show_info_widgets = True self._last_view_type = "ByGroup" # ByGroup, ByLocation, ByType, BySub, ByTag self._scale_model = 1.0 #temporary arrays #Calc Plane sizes based on items in group self._lcl_sizes = [] #Plane sizes determined by resource counts self._lcl_groups = [] #Group data for creating planes self._lcl_resources = [] #Resources to show on stage #Variables for files to import (UI settings) self._rg_csv_file_path = "" self._rg_csv_field_model = ui.SimpleStringModel() self._rs_csv_file_path = "" self._rs_csv_field_model = ui.SimpleStringModel() self._bgl_file_path = "" self._bgl_field_model = ui.SimpleStringModel() self._bgm_file_path = "" self._bgm_field_model = ui.SimpleStringModel() self._bgh_file_path = "" self._bgh_field_model = ui.SimpleStringModel() #azure connection info self._azure_tenant_id = "" self._azure_tenant_id_model =ui.SimpleStringModel() self._azure_client_id = "" self._azure_client_id_model = ui.SimpleStringModel() self._azure_client_secret = "" self._azure_client_secret_model = ui.SimpleStringModel() self._azure_subscription_id = "" self._azure_subscription_id_model = ui.SimpleStringModel() #composition options (UI settings) self._symmetric_planes_model = ui.SimpleBoolModel(False) self._packing_algo_model = ui.SimpleBoolModel(True) self._show_info_widgets_model = ui.SimpleBoolModel(True) self._primary_axis_model = ComboBoxModel("Z", "X", "Y") # track which Axis is up self._shape_up_axis_model = ComboBoxModel("Z", "X", "Y") # track which Axis is up for the shape placement self._composition_scale_model = ui.SimpleFloatModel() self._options_count_models = [ui.SimpleIntModel(), ui.SimpleIntModel(), ui.SimpleIntModel()] self._options_dist_models = [ui.SimpleFloatModel(), ui.SimpleFloatModel(), ui.SimpleFloatModel()] self._options_random_models = [ui.SimpleFloatModel(), ui.SimpleFloatModel(), ui.SimpleFloatModel()] self._composition_scale_model.as_float = 1.0 self._options_count_models[0].as_int = 10 self._options_count_models[1].as_int = 10 self._options_count_models[2].as_int = 1 self._options_dist_models[0].as_float = 250 self._options_dist_models[1].as_float = 250 self._options_dist_models[2].as_float = 250 self._options_random_models[0].as_float = 1.0 self._options_random_models[1].as_float = 1.0 self._options_random_models[2].as_float = 1.0 self.Load_Config_Data() def wipe_data(self): self._groups.clear() self._resources.clear() self._subscription_count = {} self._location_count = {} self._group_count = {} self._type_count = {} self._tag_count = {} self._subscription_cost = {} self._location_cost = {} self._group_cost = {} self._type_cost = {} self._tag_cost = {} self._map_aad = {} self._map_subscription = {} self._map_location = {} self._map_group = {} self._map_type = {} self._map_tag = {} self._lcl_sizes = [] self._lcl_groups = [] self._lcl_resources = [] carb.log_info("Data Cleared.") def Save_Config_Data(self): settings = carb.settings.get_settings() if self._rg_csv_file_path != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/rg_csv_file_path", self._rg_csv_file_path) if self._rs_csv_file_path != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/rs_csv_file_path", self._rs_csv_file_path) if self._azure_tenant_id != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/azure_tenant_id", self._azure_tenant_id) if self._azure_client_id != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/azure_client_id", self._azure_client_id) if self._azure_subscription_id != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/azure_subscription_id", self._azure_subscription_id) if self._source_of_data != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/last_data_source", self._source_of_data) if self._bgl_file_path != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/bgl_file_path", self._bgl_file_path) if self._bgm_file_path != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/bgm_file_path", self._bgm_file_path) if self._bgh_file_path != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/bgh_file_path", self._bgh_file_path) if self._last_view_type != "": settings.set("/persistent/exts/meta.cloud.explorer.azure/last_view_type", self._last_view_type) if self._options_count_models[0].as_int >0: settings.set("/persistent/exts/meta.cloud.explorer.azure/x_group_count", self._options_count_models[0].as_int) if self._options_count_models[1].as_int >0: settings.set("/persistent/exts/meta.cloud.explorer.azure/y_group_count", self._options_count_models[1].as_int) if self._options_count_models[2].as_int >= 0: settings.set("/persistent/exts/meta.cloud.explorer.azure/z_group_count", self._options_count_models[2].as_int) if self._options_dist_models[0].as_float >= 0: settings.set("/persistent/exts/meta.cloud.explorer.azure/x_dist_count", self._options_dist_models[0].as_float) if self._options_dist_models[1].as_float >= 0: settings.set("/persistent/exts/meta.cloud.explorer.azure/y_dist_count", self._options_dist_models[1].as_float) if self._options_dist_models[2].as_float >= 0: settings.set("/persistent/exts/meta.cloud.explorer.azure/z_dist_count", self._options_dist_models[2].as_float) if self._options_random_models[0].as_float >= 0: settings.set("/persistent/exts/meta.cloud.explorer.azure/x_random_count", self._options_random_models[0].as_float) if self._options_random_models[1].as_float >= 0: settings.set("/persistent/exts/meta.cloud.explorer.azure/y_random_count", self._options_random_models[1].as_float) if self._options_random_models[2].as_float >= 0: settings.set("/persistent/exts/meta.cloud.explorer.azure/z_random_count", self._options_random_models[2].as_float) settings.set("/persistent/exts/meta.cloud.explorer.azure/show_info_widgets", self._show_info_widgets) #Load Saved config data def Load_Config_Data(self): settings = carb.settings.get_settings() self._rg_csv_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/rg_csv_file_path") self._rs_csv_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/rs_csv_file_path") self._azure_tenant_id = settings.get("/persistent/exts/meta.cloud.explorer.azure/azure_tenant_id") self._azure_client_id = settings.get("/persistent/exts/meta.cloud.explorer.azure/azure_client_id") self._azure_subscription_id = settings.get("/persistent/exts/meta.cloud.explorer.azure/azure_subscription_id") try: self._azure_client_secret = os.getenv('MCE_CLIENT_SECRET') except: self._azure_client_secret= "" self._source_of_data = settings.get("/persistent/exts/meta.cloud.explorer.azure/last_data_source") self._bgl_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/bgl_file_path") self._bgm_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/bgm_file_path") self._bgh_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/bgh_file_path") self._last_view_type= settings.get("/persistent/exts/meta.cloud.explorer.azure/last_view_type") self._show_info_widgets= settings.get("/persistent/exts/meta.cloud.explorer.azure/show_info_widgets") try: self._options_count_models[0].set_value(int(settings.get("/persistent/exts/meta.cloud.explorer.azure/x_group_count"))) self._options_count_models[1].set_value(int(settings.get("/persistent/exts/meta.cloud.explorer.azure/y_group_count"))) self._options_count_models[2].set_value(int(settings.get("/persistent/exts/meta.cloud.explorer.azure/z_group_count"))) self._options_dist_models[0].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/x_dist_count"))) self._options_dist_models[1].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/y_dist_count"))) self._options_dist_models[2].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/z_dist_count"))) self._options_random_models[0].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/x_random_count"))) self._options_random_models[1].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/y_random_count"))) self._options_random_models[2].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/z_random_count"))) except: #set defualts self._last_view_type = "ByGroup" self._composition_scale_model.set_value(1.0) self._options_count_models[0].set_value(10) self._options_count_models[1].set_value(10) self._options_count_models[2].set_value(1) self._options_dist_models[0].set_value(250) self._options_dist_models[1].set_value(250) self._options_dist_models[2].set_value(250) self._options_random_models[0].set_value(1.0) self._options_random_models[1].set_value(1.0) self._options_random_models[2].set_value(1) #set defaults if self._bgl_file_path is None: self._bgl_file_path = RES_PATH.joinpath("grid_green.png") self._bgm_file_path = RES_PATH.joinpath("grid_blue.png") self._bgh_file_path = RES_PATH.joinpath("grid_red.png") self.Save_Config_Data() #-- SINGLETON SUPPORT def instance(self): """ Returns the singleton instance. Upon its first call, it creates a new instance of the decorated class and calls its `__init__` method. On all subsequent calls, the already created instance is returned. """ try: return self._instance except AttributeError: self._instance = self._decorated() return self._instance def __call__(self): raise TypeError('Singletons must be accessed through `instance()`.') def __instancecheck__(self, inst): return isinstance(inst, self._decorated)
12,674
Python
47.563218
150
0.621272
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/group_aad.py
from .group_base import GroupBase from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf import locale class AADGrpView(GroupBase): def __init__(self, viewPath:str, scale:float, upAxis:str, shapeUpAxis:str): self._scale = scale self._upAxis = upAxis self._shapeUpAxis = shapeUpAxis self.view_path = viewPath super().__init__() def calcGroupPlaneSizes(self): pass def calulateCosts(self): for g in self._dataStore._lcl_groups: #Get the cost by resource group locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' ) try: self._cost = str(locale.currency(self._dataStore._aad_cost[g])) except: self._cost = "" # blank not 0, blank means dont show it at all def prepResources(self): pass # Requires subclass implm
879
Python
28.333332
83
0.588168
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/extension.py
import sys import carb import omni.ext import asyncio from functools import partial import omni.ext import omni.kit.ui import omni.ui as ui import omni.kit.pipapi # Requires Code 2022.1.2+ - Blocked by typing_extensions incompatibility from omni.kit.viewport.utility import get_active_viewport_window from .views import MainView, WINDOW_NAME from .viewport_scene import ViewportScene from .object_info_model import ObjectInfoModel from .widget_info_model import WidgetInfoModel ## AZURE API DISABLED IN 2022.1.3, due to PIP library problem wtih typing_extensions library. #omni.kit.pipapi.install("azure-identity", module="azure-identity", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True ) #omni.kit.pipapi.install("azure-mgmt-resource", module="azure-mgmt-resource", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True ) #omni.kit.pipapi.install("pandas", module="pandas", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True ) #sys.path.append("D:/python37/lib/site-packages") #print(sys.modules.keys()) #from azure.mgmt.resource import ResourceManagementClient #from azure.identity import AzureCliCredential # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) # will be instantiated when extension gets enabled and `on_startup(ext_id)` will be called. # Later when extension gets disabled on_shutdown() is called class MetaCloudExplorerAzure(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. MENU_PATH = f"Window/{WINDOW_NAME}" def on_startup(self, ext_id): carb.log_info("[meta.cloud.explorer.azure.extension] MetaCloudExplorer startup") self._ext_id = ext_id self._menu_path = f"Window/{WINDOW_NAME}" self._window = None def on_menu_click(menu, toggled): """Handles showing and hiding the window from the 'Windows' menu.""" if toggled: # Get the active Viewport (which at startup is the default Viewport) self._viewport_window = get_active_viewport_window() # Issue an error if there is no Viewport if not self._viewport_window: carb.log_error(f"No Viewport Window to add {self._ext_id} scene to") return # Build out the scene objModel = ObjectInfoModel() widModel = WidgetInfoModel() self._viewport_scene = ViewportScene(viewport_window=self._viewport_window, ext_id=self._ext_id,widgetModel=widModel, objectModel=objModel) self._window = MainView(WINDOW_NAME, widgetModel=widModel, objectModel=objModel) else: self._window.show() # Deregister the function that shows the window from omni.ui #ui.Workspace.set_show_window_fn(MetaCloudExplorerAzure.WINDOW_NAME, None) self._menu = omni.kit.ui.get_editor_menu().add_item(self._menu_path, on_menu_click, True) def on_shutdown(self): carb.log_info("[meta.cloud.explorer.azure.extension] MetaCloudExplorer shutdown") omni.kit.ui.get_editor_menu().remove_item(self._menu) if hasattr(self, "_window"): if self._window: self._window.destroy() self._window = None if hasattr(self, "_viewport_scene"): if self._viewport_scene: self._viewport_scene.destroy() self._viewport_scene = None
3,749
Python
43.117647
168
0.672713
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/azure_data_manager.py
# import omni.kit.pipapi # import carb # import os # import json # import sys # from datetime import datetime # import omni.kit.notification_manager as nm # omni.kit.pipapi.install("azure-identity", module="azure-identity", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True ) # omni.kit.pipapi.install("azure-mgmt-resource", module="azure-mgmt-resource", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True ) # sys.path.append("D:/python37/lib/site-packages") # #print(sys.modules.keys()) # from .data_store import DataStore # from .prim_utils import cleanup_prim_path # from azure.mgmt.resource import ResourceManagementClient # from azure.mgmt.resource.subscriptions import SubscriptionClient # from azure.identity import ClientSecretCredential # import asyncio # import os # # Manage resources and resource groups - create, update and delete a resource group, # # deploy a solution into a resource group, export an ARM template. Create, read, update # # and delete a resource # class AzureDataManager(): # def __init__(self): # self._dataStore = DataStore.instance() # Get A Singleton instance, store data here # def get_token(self): # # Acquire a credential object using CLI-based authentication. # if self._dataStore._azure_tenant_id =="": # self.sendNotify("MCE: Please enter Azure credentials to connect...", nm.NotificationStatus.WARNING) # return # if self._dataStore._azure_client_secret =="": # self.sendNotify("MCE: Please enter Azure client secret to connect...", nm.NotificationStatus.WARNING) # return False # self.sendNotify("MCE: Connecting to Azure Tenant...", nm.NotificationStatus.INFO) # self._token_credential = ClientSecretCredential( # self._dataStore._azure_tenant_id, # self._dataStore._azure_client_id, # self._dataStore._azure_client_secret) # # Retrieve subscription ID from environment variable. # self._subscription_id = self._dataStore._azure_subscription_id # return True # #validate we can connect # def connect(self): # #Get a token # valid = self.get_token() # try: # if (valid): # # List subscriptions # subscription_client = SubscriptionClient(credential=self._token_credential) # page_result = subscription_client.subscriptions.list() # result = [item for item in page_result] # for item in result: # carb.log_warn(item.subscription_id) # carb.log_warn(item.tags) # except: # valid = False # error = sys.exc_info()[0] # carb.log_error("Oops! " + str(error) + " occurred.") # self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING) # return valid # def clicked_ok(): # carb.log_info("User clicked ok") # def sendNotify(self, message:str, status:nm.NotificationStatus): # # https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager# # import omni.kit.notification_manager as nm # ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok) # nm.post_notification( # message, # hide_after_timeout=True, # duration=3, # status=status, # button_infos=[], # ) # #Connect to API and load adata # def load_data(self): # self.save_connection_data() # self.load_groups() # self.load_resources() # def save_connection_data(self): # self._dataStore.Save_Config_Data() # def load_resources(self): # try: # resCnt = 0 # for grp in self._dataStore._groups: # resources = self.list_group_resources(grp) # for res in resources: # resCnt = resCnt +1 # name = cleanup_prim_path(self, Name=res.name) # self._dataStore._resources[name] = {"name":name, "type": res.type, "group": grp, "location":res.location, "subscription":self._subscription_id, "lmcost": 0} # #self._dataStore.res["name"] = {"name":res["name"], "type": type, "group": group, "location":location, "subscription":subscription, "lmcost": lmcost} # self.sendNotify("MCE: Azure resources loaded: " + str(len(self._dataStore._resources)), nm.NotificationStatus.INFO) # carb.log_info("Azure API resources loaded: " + str(len(self._dataStore._resources))) # except: # error = sys.exc_info()[0] # carb.log_error("Oops! " + str(error) + " occurred.") # self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING) # def load_groups(self): # try: # resource_client = ResourceManagementClient(self._token_credential, self._subscription_id) # rg_groups = resource_client.resource_groups.list() # grpCnt = 0 # for group in rg_groups: # grp = {group.name:{"name":group.name, "subs": self._subscription_id, "location":group.location}} # self._dataStore._groups.update(grp) # grpCnt = grpCnt + 1 # self.sendNotify("MCE: Azure groups loaded: " + str(len(self._dataStore._groups)), nm.NotificationStatus.INFO) # carb.log_info("Azure API groups loaded: " + str(len(self._dataStore._groups))) # except: # error = sys.exc_info()[0] # carb.log_error("Oops! " + str(error) + " occurred.") # self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING) # #return a list of resource groups # def get_resource_groups(self): # # Obtain the management object for resources. # try: # resource_client = ResourceManagementClient(self._token_credential, self._subscription_id) # rg_groups = resource_client.resource_groups.list() # return rg_groups # except: # error = sys.exc_info()[0] # carb.log_error("Oops! " + str(error) + " occurred.") # self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING) # #for item in rg_groups: # # print(item) # # List Resources within the group # def list_group_resources(self, groupName:str): # # Obtain the management object for resources. # resource_client = ResourceManagementClient(self._token_credential, self._subscription_id) # carb.log_info("List all of the resources within the group") # res = resource_client.resources.list_by_resource_group(groupName) # return res # #creates a resource group with groupName at location # def create_resource_group(self, groupName:str, location:str): # # Obtain the management object for resources. # resource_client = ResourceManagementClient(self._token_credential, self._subscription_id) # # # # Managing resource groups # # # resource_group_params = {"location": location} # # Create Resource group # print("Create Resource Group: " + groupName + " @ " + location) # self.print_item( # resource_client.resource_groups.create_or_update( # groupName, resource_group_params) # ) # def print_item(self, group): # """Print a ResourceGroup instance.""" # print("\tName: {}".format(group.name)) # print("\tId: {}".format(group.id)) # print("\tLocation: {}".format(group.location)) # print("\tTags: {}".format(group.tags)) # self.print_properties(group.properties) # def print_properties(self, props): # """Print a ResourceGroup properties instance.""" # if props and props.provisioning_state: # print("\tProperties:") # print("\t\tProvisioning State: {}".format(props.provisioning_state)) # print("\n\n") # # Create a Key Vault in the Resource Group # def create_key_vault(self, vaultName:str, location:str, groupName:str): # # Obtain the management object for resources. # resource_client = ResourceManagementClient(self._token_credential, self._subscription_id) # print("Create a Key Vault via a Generic Resource Put") # key_vault_params = { # "location": location, # "properties": { # "sku": {"family": "A", "name": "standard"}, # "tenantId": self._dataStore._azure_tenant_id, # "accessPolicies": [], # "enabledForDeployment": True, # "enabledForTemplateDeployment": True, # "enabledForDiskEncryption": True # }, # } # resource_client.resources.begin_create_or_update( # resource_group_name=groupName, # resource_provider_namespace="Microsoft.KeyVault", # parent_resource_path="", # resource_type="vaults", # # Suffix random string to make vault name unique # resource_name=vaultName + datetime.utcnow().strftime("-%H%M%S"), # api_version="2019-09-01", # parameters=key_vault_params # ).result() # # Export the Resource group template # def export_group_template(self, groupName:str): # # Obtain the management object for resources. # resource_client = ResourceManagementClient(self._token_credential, self._subscription_id) # print("Export Resource Group Template") # BODY = { # 'resources': ['*'] # } # result = json.dumps( # resource_client.resource_groups.begin_export_template( # groupName, BODY).result().template, indent=4 # ) # print(result + "\n\n") # return result # # def run_example(): # # """Resource Group management example.""" # # # # # # Create the Resource Manager Client with an Application (service principal) token provider # # # # # subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", None) # your Azure Subscription Id # # credentials = DefaultAzureCredential() # # client = ResourceManagementClient(credentials, subscription_id) # # # # # # Managing resource groups # # # # # resource_group_params = {"location": "westus"} # # # List Resource Groups # # print("List Resource Groups") # # for item in client.resource_groups.list(): # # print_item(item) # # # Create Resource group # # print("Create Resource Group") # # print_item( # # client.resource_groups.create_or_update( # # GROUP_NAME, resource_group_params) # # ) # # # Modify the Resource group # # print("Modify Resource Group") # # resource_group_params.update(tags={"hello": "world"}) # # print_item( # # client.resource_groups.update( # # GROUP_NAME, resource_group_params) # # ) # # # Create a Key Vault in the Resource Group # # print("Create a Key Vault via a Generic Resource Put") # # key_vault_params = { # # "location": "westus", # # "properties": { # # "sku": {"family": "A", "name": "standard"}, # # "tenantId": os.environ["AZURE_TENANT_ID"], # # "accessPolicies": [], # # "enabledForDeployment": True, # # "enabledForTemplateDeployment": True, # # "enabledForDiskEncryption": True # # }, # # } # # client.resources.begin_create_or_update( # # resource_group_name=GROUP_NAME, # # resource_provider_namespace="Microsoft.KeyVault", # # parent_resource_path="", # # resource_type="vaults", # # # Suffix random string to make vault name unique # # resource_name="azureSampleVault" + datetime.utcnow().strftime("-%H%M%S"), # # api_version="2019-09-01", # # parameters=key_vault_params # # ).result() # # # List Resources within the group # # print("List all of the resources within the group") # # for item in client.resources.list_by_resource_group(GROUP_NAME): # # print_item(item) # # # Export the Resource group template # # print("Export Resource Group Template") # # BODY = { # # 'resources': ['*'] # # } # # print( # # json.dumps( # # client.resource_groups.begin_export_template( # # GROUP_NAME, BODY).result().template, indent=4 # # ) # # ) # # print("\n\n") # # # Delete Resource group and everything in it # # print("Delete Resource Group") # # delete_async_operation = client.resource_groups.begin_delete(GROUP_NAME) # # delete_async_operation.wait() # # print("\nDeleted: {}".format(GROUP_NAME))
13,676
Python
38.758721
178
0.568368
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/rectangle.py
from omni import ui from .constant import COLORS # A rectangle that will block mouse events to deeper class OpaqueRectangle(ui.Rectangle): def __init__(self, **kwargs): kwargs["opaque_for_mouse_events"] = True kwargs["mouse_pressed_fn"] = lambda *_: self._dummy() super().__init__(**kwargs) def _dummy(self): pass class ShortSeparator: def __init__(self, height): self._panel = ui.HStack(width=2, height=height) with self._panel: with ui.VStack(width=2, height=height, style={"Line": {"color": COLORS.LIGHRT_GARY, "border_width": 1}}): ui.Spacer(height=2) ui.Line(width=1, alignment=ui.Alignment.LEFT) ui.Line(width=1, alignment=ui.Alignment.LEFT) ui.Spacer(height=2) class DashRectangle: def __init__(self, width, height, padding_x=2, padding_y=2, w_step=10, h_step=10): w_num = int((width - 2 * padding_x - 2) / w_step) h_num = int((height - 2 * padding_y) / h_step) with ui.ZStack(): with ui.VStack(style={"Line": {"color": COLORS.LIGHRT_GARY, "border_width": 1}}): ui.Spacer(height=padding_y) self._build_horizontal_line(w_num, padding_x) ui.Spacer(height=height - 2 * padding_y - 2) self._build_horizontal_line(w_num, padding_x) ui.Spacer(height=padding_y) with ui.HStack(height=height): ui.Spacer(width=padding_x) self._build_vertical_line(height, h_step, h_num, padding_y) ui.Spacer() self._build_vertical_line(height, h_step, h_num, padding_y) ui.Spacer(width=padding_x - 2) def _build_horizontal_line(self, w_num, padding_x): with ui.HStack(): ui.Spacer(width=padding_x) for _ in range(w_num): ui.Line(width=6, height=1) ui.Spacer() ui.Line(height=1) ui.Spacer(width=padding_x) def _build_vertical_line(self, height, h_step, h_num, padding_y): with ui.VStack(width=2, height=height): ui.Spacer(height=padding_y) for _ in range(h_num): ShortSeparator(h_step)
2,269
Python
36.833333
117
0.555751
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/packer.py
""" MIT License Copyright (c) 2016 Michael Shihrer (michael@shihrer.me) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ """ EXAMPLE USAGE: https://repl.it/NfZq/1 blocks = [] blocks.append(Block((21,10))) blocks.append(Block((5,10))) blocks.append(Block((5,10))) blocks.append(Block((7,13))) blocks.append(Block((2,4))) pack = Packer() pack.fit(blocks) for block in blocks: if block.fit: print("size: {} loc: {}".format(block.size, block.fit.location)) else: print("not fit: {}".format(block.size)) """ """ For a more fleshed out example, see: https://github.com/shihrer/BinPacker/tree/Develop This has a number of optimizations like removing recursion so it can run on much, much large inputs without hitting any stack limitations. Basically an order of magnitude faster on very large inputs. Also includes a simple visualizer for the results using pygame. """ class Packer: """ Defines a packer object to be used on a list of blocks. """ def __init__(self): self.root = None def fit(self, blocks): """ Initiates the packing. blocks: A list of block objects with a 'size' proprety representing (w,h) as a tuple. """ self.root = Node((0, 0), blocks[0].size) for block in blocks: some_node = self.find_node(self.root, block.size) if some_node is not None: block.fit = self.split_node(some_node, block.size) else: block.fit = self.grow_node(block.size) return None def find_node(self, some_node, size): if some_node.used: return self.find_node(some_node.right, size) or self.find_node(some_node.down, size) elif (size[0] <= some_node.size[0]) and (size[1] <= some_node.size[1]): return some_node else: return None def split_node(self, some_node, size): some_node.used = True some_node.down = Node((some_node.location[0], some_node.location[1] + size[1]), (some_node.size[0], some_node.size[1] - size[1])) some_node.right = Node((some_node.location[0] + size[0], some_node.location[1]), (some_node.size[0] - size[0], size[1])) return some_node def grow_node(self, size): can_go_down = size[0] <= self.root.size[0] can_go_right = size[1] <= self.root.size[1] should_go_down = can_go_down and (self.root.size[0] >= (self.root.size[1] + size[1])) should_go_right = can_go_right and (self.root.size[1] >= (self.root.size[0] + size[0])) if should_go_right: return self.grow_right(size) elif should_go_down: return self.grow_down(size) elif can_go_right: return self.grow_right(size) elif can_go_down: return self.grow_down(size) else: return None def grow_right(self, size): new_root = Node((0, 0), (self.root.size[0] + size[0], self.root.size[1])) new_root.used = True new_root.down = self.root new_root.right = Node((self.root.size[0], 0), (size[0], self.root.size[1])) self.root = new_root some_node = self.find_node(self.root, size) if some_node is not None: return self.split_node(some_node, size) else: return None def grow_down(self, size): new_root = Node((0, 0), (self.root.size[0], self.root.size[1] + size[1])) new_root.used = True new_root.down = Node((0, self.root.size[1]), (self.root.size[0], size[1])) new_root.right = self.root self.root = new_root some_node = self.find_node(self.root, size) if some_node is not None: return self.split_node(some_node, size) else: return None class Block: """ Defines an object Block with two properties. size: tuple representing the blocks size (w,h) fit: Stores a Node object for output. """ def __init__(self, size): self.size = size self.fit = None class Node: """ Defines an object Node for use in the packer function. Represents the space that a block is placed. used: Boolean to determine if a node has been used. down: A node located beneath the current node. right: A node located to the right of the current node. size: A tuple (w,h) representing the size of the node. location: A tuple representing the (x,y) coordinate of the top left of the node. """ def __init__(self, location, size): self.used = False self.down = None self.right = None self.size = size self.location = location
5,737
Python
34.419753
104
0.625937
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/style_meta.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["meta_window_style"] from omni.ui import color as cl from omni.ui import constant as fl from omni.ui import url import omni.kit.app import omni.ui as ui import pathlib # Pre-defined constants. It's possible to change them runtime. cl.meta_window_hovered = cl("#2b2e2e") cl.meta_window_text = cl("#9e9e9e") fl.meta_window_attr_hspacing = 10 fl.meta_window_attr_spacing = 1 fl.meta_window_group_spacing = 2 # Pre-defined constants. It's possible to change them runtime. fl_attr_hspacing = 10 fl_attr_spacing = 1 fl_group_spacing = 5 cl_attribute_dark = cl("#202324") cl_attribute_red = cl("#ac6060") cl_attribute_green = cl("#60ab7c") cl_attribute_blue = cl("#35889e") cl_line = cl("#404040") cl_text_blue = cl("#5eb3ff") cl_text_gray = cl("#707070") cl_text = cl("#a1a1a1") cl_text_hovered = cl("#ffffff") cl_field_text = cl("#5f5f5f") cl_widget_background = cl("#1f2123") cl_attribute_default = cl("#505050") cl_attribute_changed = cl("#55a5e2") cl_slider = cl("#383b3e") cl_combobox_background = cl("#252525") cl_main_background = cl("#2a2b2c") cls_temperature_gradient = [cl("#fe0a00"), cl("#f4f467"), cl("#a8b9ea"), cl("#2c4fac"), cl("#274483"), cl("#1f334e")] cls_color_gradient = [cl("#fa0405"), cl("#95668C"), cl("#4b53B4"), cl("#33C287"), cl("#9fE521"), cl("#ff0200")] cls_tint_gradient = [cl("#1D1D92"), cl("#7E7EC9"), cl("#FFFFFF")] cls_grey_gradient = [cl("#020202"), cl("#525252"), cl("#FFFFFF")] cls_button_gradient = [cl("#232323"), cl("#656565")] # The main style dict meta_window_style = { "Label::attribute_name": { "color": cl.meta_window_text, "margin_height": fl.meta_window_attr_spacing, "margin_width": fl.meta_window_attr_hspacing, }, "CollapsableFrame::group": {"margin_height": fl.meta_window_group_spacing}, "CollapsableFrame::group:hovered": {"secondary_color": cl.meta_window_hovered}, # for Gradient Image "ImageWithProvider::gradient_slider":{"border_radius": 4, "corner_flag": ui.CornerFlag.ALL}, "ImageWithProvider::button_background_gradient": {"border_radius": 3, "corner_flag": ui.CornerFlag.ALL}, } #Functions from NVIDIA def hex_to_color(hex: int) -> tuple: # convert Value from int red = hex & 255 green = (hex >> 8) & 255 blue = (hex >> 16) & 255 alpha = (hex >> 24) & 255 rgba_values = [red, green, blue, alpha] return rgba_values def _interpolate_color(hex_min: int, hex_max: int, intep): max_color = hex_to_color(hex_max) min_color = hex_to_color(hex_min) color = [int((max - min) * intep) + min for max, min in zip(max_color, min_color)] return (color[3] << 8 * 3) + (color[2] << 8 * 2) + (color[1] << 8 * 1) + color[0] def get_gradient_color(value, max, colors): step_size = len(colors) - 1 step = 1.0/float(step_size) percentage = value / float(max) idx = (int) (percentage / step) if idx == step_size: color = colors[-1] else: color = _interpolate_color(colors[idx], colors[idx+1], percentage) return color def generate_byte_data(colors): data = [] for color in colors: data += hex_to_color(color) _byte_provider = ui.ByteImageProvider() _byte_provider.set_bytes_data(data, [len(colors), 1]) return _byte_provider def build_gradient_image(colors, height, style_name): byte_provider = generate_byte_data(colors) ui.ImageWithProvider(byte_provider,fill_policy=omni.ui.IwpFillPolicy.IWP_STRETCH, height=height, name=style_name) return byte_provider
3,926
Python
34.378378
117
0.670148
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/group_base.py
from abc import ABC, abstractmethod import omni.client import omni.kit.app import omni.ui as ui import omni.usd import omni.kit.commands from pathlib import Path import shutil import os import asyncio import locale import carb from .prim_utils import create_plane from .prim_utils import get_font_size_from_length from .prim_utils import draw_image from .prim_utils import cleanup_prim_path, create_and_place_prim, get_parent_child_prim_path from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf from .data_manager import DataManager from .data_store import DataStore from .math_utils import calculateGroupTransforms from .scatter_complex import distributePlanes from .omni_utils import create_prims, create_shaders from os.path import exists CURRENT_PATH = Path(__file__).parent DATA_PATH = CURRENT_PATH.joinpath("temp") #Defines an Abstract class of an Aggregate set of resource views #Children access specific data sets for the base class GroupBase(ABC): def __init__(self): self._dataManager = DataManager.instance() # Get A Singleton instance self._dataStore = DataStore.instance() # Get A Singleton instance #root prim paths self.root_path = Sdf.Path('/World') # limit the number of rows read self.max_elements = 5000 self.base_prim_size = 50 #limits self.x_threshold = 50000 self.y_threshold = 50000 self.z_threshold = 50000 self.x_extent = 0 self.y_extent = 0 self.z_extent = 0 #Create the stage... def initializeStage(self, stage_unit_per_meter:float): self._stage = omni.usd.get_context().get_stage() root_prim = self._stage.GetPrimAtPath(self.root_path) # set the up axis UsdGeom.SetStageUpAxis(self._stage, UsdGeom.Tokens.z) # set the unit of the world UsdGeom.SetStageMetersPerUnit(self._stage, stage_unit_per_meter) self._stage.SetDefaultPrim(root_prim) #Depending on the Active View, "groups" will contain different aggreagetes. #This function creates the GroundPlane objects on the stage for each group async def CreateGroups(self, transforms): #b = sorted(groups) #carb.log_info("Sorted keys",b) if (len(self._dataStore._lcl_groups)) >0 : #Create new prims and then transform them path = str(Sdf.Path(self.root_path).AppendPath(self._view_path)) create_prims( transforms=transforms, prim_names=self._dataStore._lcl_groups, parent_path=path, up_axis="Z", plane_size=self._dataStore._lcl_sizes ) #DEBUG i=0 for grp in self._dataStore._lcl_groups: prim_path = Sdf.Path(self.root_path).AppendPath(str(self._view_path)) prim_path = Sdf.Path(prim_path).AppendPath(grp["group"]) #Selects prim, creates associated OMNIPBR shaders carb.log_info("Create shader " + grp["group"] + " of " + str(len(self._dataStore._lcl_groups))) await create_shaders(base_path=prim_path, prim_name=grp["group"]) await omni.kit.app.get_app().next_update_async() #Set the shader images for the groups await self.AddShaderImages() await omni.kit.app.get_app().next_update_async() #Assign Images to the group Shaders async def AddShaderImages(self): #Images have been pre-made, jsut assign them for g in self._dataStore._lcl_groups: clean = cleanup_prim_path(self, g["group"]) #Dont show cost output_file = DATA_PATH.joinpath(clean + ".png") file_exists = exists(output_file) if not file_exists: draw_image(self, output_file=output_file, src_file=self._dataStore._bgl_file_path , textToDraw=g, costToDraw="") #Get Stage stage = omni.usd.get_context().get_stage() #Find the /Looks root curr_prim = stage.GetPrimAtPath("/") looks_path = "" for prim in Usd.PrimRange(curr_prim): if prim.GetPath() == "/Looks": looks_path = "/Looks" break elif prim.GetPath() == "/World/Looks": looks_path = "/World/Looks" break #carb.log_info("Looks root is: " +looks_path) #Get the Shader and set the image property if (looks_path == ""): looks_path = "/Looks" shader_path = Sdf.Path(looks_path) shader_path = Sdf.Path(shader_path.AppendPath(clean)) shader_path = Sdf.Path(shader_path.AppendPath("Shader")) #select the shader selection = omni.usd.get_context().get_selection() selection.set_selected_prim_paths([str(shader_path)], False) #Get the Shader shader_prim = stage.GetPrimAtPath(str(shader_path)) carb.log_info("Shader Attributes:-----" + str(shader_path)) #carb.log_info(shader_prim.GetAttributes()) carb.log_info("Set shader image " + str(output_file)) try: shader_prim.CreateAttribute("inputs:diffuse_texture", Sdf.ValueTypeNames.Asset) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(shader_path).AppendPath('.inputs:diffuse_texture'), value=str(output_file), prev=str(output_file)) await omni.kit.app.get_app().next_update_async() except: #Do it again! omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(shader_path).AppendPath('.inputs:diffuse_texture'), value=str(output_file),prev=str(output_file)) #Change the Group Shaders textures to /from cost images async def showHideCosts(self): #Get Stage stage = omni.usd.get_context().get_stage() #Find the /Looks root curr_prim = stage.GetPrimAtPath("/") looks_path = "" for prim in Usd.PrimRange(curr_prim): if prim.GetPath() == "/Looks": looks_path = "/Looks" break elif prim.GetPath() == "/World/Looks": looks_path = "/World/Looks" break #carb.log_info("Looks root is: " +looks_path) #Get the Shader and set the image property if (looks_path == ""): looks_path = "/Looks" #Flip the shader images on all group shader prims for g in self._dataStore._lcl_groups: clean = cleanup_prim_path(self, g["group"]) cost_file = DATA_PATH.joinpath(clean + "-cost.png") file_exists = exists(cost_file) if not file_exists: draw_image(self, output_file=cost_file, src_file=self._dataStore._bg_file_path , textToDraw=g, costToDraw=self._cost) output_file = DATA_PATH.joinpath(clean + ".png") file_exists = exists(output_file) if not file_exists: draw_image(self, output_file=output_file, src_file=self._dataStore._bg_file_path , textToDraw=g, costToDraw="") #Get the Shaders shader_path = Sdf.Path(looks_path) shader_path = Sdf.Path(shader_path.AppendPath(clean)) shader_path = Sdf.Path(shader_path.AppendPath("Shader")) #select the shader selection = omni.usd.get_context().get_selection() selection.set_selected_prim_paths([str(shader_path)], False) #Get the Shader shader_prim = stage.GetPrimAtPath(str(shader_path)) # carb.log_info("Shader Attributes:-----" + str(shader_path)) # carb.log_info(shader_prim.GetAttributes()) try: currentVal = shader_prim.GetAttribute("inputs:diffuse_texture").Get() if "-cost.png" not in str(currentVal): omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(shader_path).AppendPath('.inputs:diffuse_texture'), value=str(cost_file), prev=str(output_file)) else: omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(shader_path).AppendPath('.inputs:diffuse_texture'), value=str(output_file), prev=str(cost_file)) except: pass #Load the resources from map async def loadGroupResources(self,group_name, group_prim_path, values): i=0 # prim count tracker resCount = len(values) #Get the transform coordinates for a plane of this size with nn resources transforms = calculateGroupTransforms(self=self, scale=self._scale, count=resCount) for res in values: carb.log_info("Placing prim " + res["type"] + " " + str(i) + " of " + str(resCount)) resName = res["name"] resShape = res["shape"] resType = res["type"] resGrp = res["group"] resLoc = res["location"] resSub = res["subscription"] cost = res["cost"] prim_vector = transforms[i] carb.log_info("Creating prim path:" + str(group_prim_path) + " " + str(resName)) new_prim_path = get_parent_child_prim_path(self, group_prim_path, resName) carb.log_info("New prim path:" + str(new_prim_path)) await create_and_place_prim(self, prim_type= resType, prim_name=resName, grp_name=resGrp, sub_name=resSub, loc_name=resLoc, cost=cost, new_prim_path=str(new_prim_path), shapeToRender=resShape, scale=(self._scale*self.base_prim_size), position=prim_vector ) omni.kit.commands.execute('ChangeMetadata', object_paths=[str(new_prim_path)], key='kind', value='component') i=i+1 #increment resource id # Create Group Planes for the aggregates @abstractmethod def calcGroupPlaneSizes(self): pass # Requires subclass implm # Calc Costs for the aggregates @abstractmethod def calulateCosts(self): pass # Requires subclass implm # Load the resources for this view's groups @abstractmethod def loadResources(self): pass # Requires subclass implm #Selcet the active group's prims @abstractmethod def selectGroupPrims(self): pass # Requires subclass implm
11,167
Python
35.736842
142
0.563535
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/Singleton.py
class Singleton: """ A non-thread-safe helper class to ease implementing singletons. This should be used as a decorator -- not a metaclass -- to the class that should be a singleton. The decorated class can define one `__init__` function that takes only the `self` argument. Also, the decorated class cannot be inherited from. Other than that, there are no restrictions that apply to the decorated class. To get the singleton instance, use the `instance` method. Trying to use `__call__` will result in a `TypeError` being raised. """ def __init__(self, decorated): self._decorated = decorated def instance(self): """ Returns the singleton instance. Upon its first call, it creates a new instance of the decorated class and calls its `__init__` method. On all subsequent calls, the already created instance is returned. """ try: return self._instance except AttributeError: self._instance = self._decorated() return self._instance def __call__(self): raise TypeError('Singletons must be accessed through `instance()`.') def __instancecheck__(self, inst): return isinstance(inst, self._decorated)
1,279
Python
33.594594
76
0.647381
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/unit_tests.py
import unittest from .math_utils import calcPlaneSizeForGroup class TestRGPlaneSizeCalc(unittest.TestCase): def test_lower(self): planeSize = calcPlaneSizeForGroup(1) self.assertEqual(planeSize, 1) def test_lower(self): planeSize = calcPlaneSizeForGroup(2) self.assertEqual(planeSize, 2) def test_lower(self): planeSize = calcPlaneSizeForGroup(3) self.assertEqual(planeSize, 2) def test_lower(self): planeSize = calcPlaneSizeForGroup(4) self.assertEqual(planeSize, 2) def test_lower(self): planeSize = calcPlaneSizeForGroup(5) self.assertEqual(planeSize, 3) def test_lower(self): planeSize = calcPlaneSizeForGroup(6) self.assertEqual(planeSize, 3) if __name__ == '__main__': unittest.main()
823
Python
24.749999
45
0.665857
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/azure_resource_map.py
shape_usda_name = { "AAD":"omniverse://localhost/MCE/3dIcons/AzureAAD_1.1.usd", "Resource_Group":"omniverse://localhost/MCE/3dIcons/Resource_Groups_3.0.usd", "Storage_account":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2_8.usd", "App_Service":"omniverse://localhost/MCE/3dIcons/AppServices_1.2.usd", "Subscription":"omniverse://localhost/MCE/3dIcons/Subscriptions_1.3.usd", "API_Connection":"omniverse://localhost/MCE/3dIcons/API_Connection.usd", "API_Management_service":"omniverse://localhost/MCE/3dIcons/API_management_services_fix.usd", "App_Configuration":"omniverse://localhost/MCE/3dIcons/App-Configuration.usd", "App_Service_plan":"omniverse://localhost/MCE/3dIcons/app_service_plan_fix.usd", "App_Service":"omniverse://localhost/MCE/3dIcons/AppServices_1.2.usd", "Application_Insights":"omniverse://localhost/MCE/3dIcons/Application_Insights_4.0.usd", "Application_gateway":"omniverse://localhost/MCE/3dIcons/Application_Gateway.usd", "Automation_Account":"omniverse://localhost/MCE/3dIcons/automation_accounts_fix.usd", "Availability_test":"omniverse://localhost/MCE/3dIcons/Availability_Test.usd", "Azure_Bot":"omniverse://localhost/MCE/3dIcons/Web_App_Bot.usd", "Azure_Cosmos_DB_API_for_MongoDB_account":"omniverse://localhost/MCE/3dIcons/Azure_Cosmos_DB_API_MongoDB.usd", "Azure_Cosmos_DB_account":"omniverse://localhost/MCE/3dIcons/Azure_Cosmos_DB.usd", "Azure_Data_Explorer_Cluster":"omniverse://localhost/MCE/3dIcons/azure_data_explorer_clusters_fix.usd", "Azure_DevOps_organization":"omniverse://localhost/MCE/3dIcons/Azure_Dev_Ops.usd", "Azure_Machine_Learning":"omniverse://localhost/MCE/3dIcons/Azure_Machine_Learning.usd", "Azure_Workbook":"omniverse://localhost/MCE/3dIcons/azure_workbook_fix.usd", "Bastion":"omniverse://localhost/MCE/3dIcons/Bastion.usd", "Cognitive_Service":"omniverse://localhost/MCE/3dIcons/Cognitive_Services.usd", "Container_registry":"omniverse://localhost/MCE/3dIcons/container_registries.usd", "Data_Lake_Analytics":"omniverse://localhost/MCE/3dIcons/Data_Lake_Analytics_1.2.usd", "Data_Lake_Storage_Gen1":"omniverse://localhost/MCE/3dIcons/data_lake_storage_gen1_fix.usd", "Data_factory__V2_":"omniverse://localhost/MCE/3dIcons/data_factory_fix.usd", "Disk":"omniverse://localhost/MCE/3dIcons/Disk_1.0.usd", "DNS_zone":"omniverse://localhost/MCE/3dIcons/DNS_Zone.usd", "DNS_Zone":"omniverse://localhost/MCE/3dIcons/DNS_Zone.usd", "Event_Grid_System_Topic":"omniverse://localhost/MCE/3dIcons/event_grid_topics_fix.usd", "Event_Hubs_Namespace":"omniverse://localhost/MCE/3dIcons/events_hub_fix.usd", "Firewall_Policy":"omniverse://localhost/MCE/3dIcons/Firewall_Policy.usd", "Firewall":"omniverse://localhost/MCE/3dIcons/Firewall.usd", "Function_App":"omniverse://localhost/MCE/3dIcons/function_apps_fix.usd", "Image":"omniverse://localhost/MCE/3dIcons/image_fix.usd", "Key_vault":"omniverse://localhost/MCE/3dIcons/Key_Vaults_2.0.usd", "Kubernetes_service":"omniverse://localhost/MCE/3dIcons/kubernetess_services_fix.usd", "Language":"omniverse://localhost/MCE/3dIcons/Language_Understanding.usd", "Language_understanding":"omniverse://localhost/MCE/3dIcons/Language_Understanding.usd", "Load_balancer":"omniverse://localhost/MCE/3dIcons/load_balancer_fix.usd", "Log_Analytics_query_pack":"omniverse://localhost/MCE/3dIcons/Log_Analytics_Query_Pack.usd", "Log_Analytics_workspace":"omniverse://localhost/MCE/3dIcons/Log_Analytics_Workspace.usd", "Logic_App__Standard_":"omniverse://localhost/MCE/3dIcons/Logic_Apps_Std.usd", "Logic_app":"omniverse://localhost/MCE/3dIcons/Logic_apps_fix.usd", "Logic_apps_custom_connector":"omniverse://localhost/MCE/3dIcons/Logic_Apps_Custom_Connector.usd", "Managed_Identity":"omniverse://localhost/MCE/3dIcons/Managed_Identity.usd", "Managed_application":"omniverse://localhost/MCE/3dIcons/Managed_Identity.usd", "Network_Interface":"omniverse://localhost/MCE/3dIcons/network_interface_fix.usd", "Microsoft_Network_networkInterfaces":"omniverse://localhost/MCE/3dIcons/network_interface_fix.usd", "Network_Watcher":"omniverse://localhost/MCE/3dIcons/network_watcher_fix.usd", "Network_security_group":"omniverse://localhost/MCE/3dIcons/network_security_group_fix.usd", "Microsoft_Network_networkSecurityGroups" : "omniverse://localhost/MCE/3dIcons/network_security_group_fix.usd", "Power_BI_Embedded":"omniverse://localhost/MCE/3dIcons/Power_BI_Embedded_2.0.usd", "Private_DNS_zone":"omniverse://localhost/MCE/3dIcons/Private_DNS_Zone_2.0.usd", "Private_endpoint":"omniverse://localhost/MCE/3dIcons/Private_End_Point.usd", "Public_IP_address":"omniverse://localhost/MCE/3dIcons/public_ip_adresses_fix.usd", "Recovery_Services_vault":"omniverse://localhost/MCE/3dIcons/Recovery_Services_Vault.usd", "Restore_Point_Collection":"omniverse://localhost/MCE/3dIcons/Restore_Point_Collection_2.0.usd", "Runbook":"omniverse://localhost/MCE/3dIcons/Runbook.usd", "SQL_database":"omniverse://localhost/MCE/3dIcons/SQLDatabase.usd", "SQL_elastic_pool":"omniverse://localhost/MCE/3dIcons/SQL_Elastic_Pools.usd", "SQL_server":"omniverse://localhost/MCE/3dIcons/SQLServer.usd", "SQL_virtual_machine":"omniverse://localhost/MCE/3dIcons/SQL_Virtual_Machine_1.1.usd", "Search_service":"omniverse://localhost/MCE/3dIcons/Search_Services_1.0.usd", "Service_Bus_Namespace":"omniverse://localhost/MCE/3dIcons/service_bus_fix.usd", "Service_Fabric_cluster":"omniverse://localhost/MCE/3dIcons/service-fabric-clusters_fix.usd", "Shared_dashboard":"omniverse://localhost/MCE/3dIcons/Shared_Dashboard.usd", "Snapshot":"omniverse://localhost/MCE/3dIcons/Snapshot.usd", "Solution":"omniverse://localhost/MCE/3dIcons/solution.usd", "Storage_account":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2.8.usd", "Traffic_Manager_profile":"omniverse://localhost/MCE/3dIcons/Traffic_Manager_Profiles_1.0.usd", "Virtual_machine_scale_set":"omniverse://localhost/MCE/3dIcons/Virtual_Machines_Scale_Sets_2.0.usd", "Virtual_machine":"omniverse://localhost/MCE/3dIcons/Virtual_Machine_1.1.usd", "Virtual_network":"omniverse://localhost/MCE/3dIcons/Virtual_Network.usd", "Web_App_Bot":"omniverse://localhost/MCE/3dIcons/Web_App_Bot.usd", "Coat_Rack":"omniverse://localhost/MCE/3dIcons/Coat_Rack_Bowler_Hat.usdz", "Observation_Chair":"omniverse://localhost/MCE/3dIcons/Green-Ball-Chair.usd", "Leather_Jacket":"omniverse://localhost/MCE/3dIcons/Leather_Jacket.usdz", "Rug_V4": "omniverse://localhost/MCE/3dIcons/RugV4.usd", "Neon_All_Resources": "omniverse://localhost/MCE/NeonSigns/all_resources/all_resources.usd", "Neon_By_Group": "omniverse://localhost/MCE/NeonSigns/resources_by_group/resources_by_group.usd", "Neon_By_Subscription": "omniverse://localhost/MCE/NeonSigns/resources_by_cost/resources_by_cost.usd", "Neon_By_Location": "omniverse://localhost/MCE/NeonSigns/resources_by_location/resources_by_location.usd", "Neon_Azure_Cloud": "omniverse://localhost/MCE/NeonSigns/azure cloud/azure cloud.usd", "Microsoft_Web_certificates" :"omniverse://localhost/MCE/3dIcons/App_Service_Certificates.usd", "Microsoft_ClassicStorage_storageAccounts":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2.8.usd", "Storage_account__classic_":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2.8.usd", "microsoft_alertsmanagement_smartDetectorAlertRules": "omniverse://localhost/MCE/3dIcons/Alerts.usd", "Microsoft_KeyVault_vaults":"omniverse://localhost/MCE/3dIcons/Key_Vaults_2.0.usd", "Microsoft_Storage_storageAccounts" : "omniverse://localhost/MCE/3dIcons/StorageAccounts_2.8.usd", "Microsoft_Network_dnszones" : "omniverse://localhost/MCE/3dIcons/DNS_Zone.usd", "Microsoft_Web_sites": "omniverse://localhost/MCE/3dIcons/AppServices_1.2.usd", "Microsoft_Web_serverFarms" : "omniverse://localhost/MCE/3dIcons/app_service_plan_fix.usd", "Microsoft_Network_networkWatchers" : "omniverse://localhost/MCE/3dIcons/network_watcher_fix.usd", "Microsoft_OperationalInsights_workspaces" : "omniverse://localhost/MCE/3dIcons/Log_Analytics_Workspace.usd", "Microsoft_OperationsManagement_solutions" : "omniverse://localhost/MCE/3dIcons/solution.usd", "microsoft_insights_autoscalesettings" : "omniverse://localhost/MCE/3dIcons/scene.usd", "microsoft_visualstudio_account" :"omniverse://localhost/MCE/3dIcons/azure_devops_fix.usd", "Microsoft_Migrate_moveCollections" : "omniverse://localhost/MCE/3dIcons/scene.usd", "microsoft_insights_actiongroups" : "omniverse://localhost/MCE/3dIcons/scene.usd", "Microsoft_Insights_components":"omniverse://localhost/MCE/3dIcons/Application_Insights_4.0.usd", "Microsoft_Portal_dashboards" : "omniverse://localhost/MCE/3dIcons/Shared_Dashboard.usd", "Microsoft_ContainerRegistry_registries": "omniverse://localhost/MCE/3dIcons/container_registries_fix.usd", "Microsoft_RecoveryServices_vaults":"omniverse://localhost/MCE/3dIcons/recovery_service_vault_fix.usd", "Microsoft_DevTestLab_schedules": "omniverse://localhost/MCE/3dIcons/scene.usd", "User_Red":"omniverse://localhost/MCE/3dIcons/User_Red.usd", "User_Blue":"omniverse://localhost/MCE/3dIcons/User_Blue.usd", "User_Orange":"omniverse://localhost/MCE/3dIcons/User_Orange.usd", "User_Green":"omniverse://localhost/MCE/3dIcons/User_Green.usd", }
9,535
Python
81.206896
115
0.745254
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/object_info_model.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["ObjectInfoModel"] from pxr import Tf from pxr import Usd from pxr import UsdGeom from omni.ui import scene as sc import omni.usd from .prim_utils import only_select_parent_prims # The distance to raise above the top of the object's bounding box TOP_OFFSET = 5 class ObjectInfoModel(sc.AbstractManipulatorModel): """ The model tracks the position and info of the selected object. """ class PositionItem(sc.AbstractManipulatorItem): """ The Model Item represents the position. It doesn't contain anything because we take the position directly from USD when requesting. """ def __init__(self): super().__init__() self.value = [0, 0, 0] def __init__(self): super().__init__() # Current selected prim and material self._current_paths = [] self.positions = [] self._stage_listener = None self.populate() # Track selection changes self.events = self._get_context().get_stage_event_stream() self.stage_event_delegate = self.events.create_subscription_to_pop( self.on_stage_event, name="Object Info Selection Update" ) def on_stage_event(self, event): """Called by stage_event_stream. We only care about selection changes.""" # NEW: if statement to only check when selection changed if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED): self.populate() def destroy(self): self.events = None self.stage_event_delegate.unsubscribe() def populate(self): self._current_paths = [] self.positions = [] usd_context = self._get_context() stage = usd_context.get_stage() #Get selected prims usd_context = omni.usd.get_context() self._stage: Usd.Stage = usd_context.get_stage() self._selection = usd_context.get_selection() self._paths = self._selection.get_selected_prim_paths() #Selectively choose the paths self._paths = only_select_parent_prims(prim_paths=self._paths) #if len(self._current_paths) > 1: #ONLY SHOW ON MULTISELECT! for path in self._paths: prim = stage.GetPrimAtPath(path) if not prim.IsValid(): return for child in prim.GetChildren(): if child.IsA(UsdGeom.Imageable): if str(path).find("Collision") == -1: if str(path).find("Baked") == -1: if str(path).find("/materials") == -1: self._current_paths.append(child.GetPath()) self.positions.append(ObjectInfoModel.PositionItem()) # Position is changed because new selected object has a different position self._item_changed(self.positions[-1]) #elif len(self._current_paths == 0): # pass def _get_context(self): # Get the UsdContext we are attached to return omni.usd.get_context() def _notice_changed(self, notice: Usd.Notice, stage: Usd.Stage) -> None: """Called by Tf.Notice. Used when the current selected object changes in some way.""" for p in notice.GetChangedInfoOnlyPaths(): for i, watched_path in enumerate(self._current_paths): if str(watched_path) in str(p.GetPrimPath()): self._item_changed(self.positions[i]) def get_name(self, index): stage = self._get_context().get_stage() prim = stage.GetPrimAtPath(self._current_paths[index]) return prim.GetCustomDataByKey('res_name') def get_num_prims(self): return len(self._current_paths) def get_position(self, index): """Returns position of currently selected object""" stage = self._get_context().get_stage() if not stage or not self._current_paths[index]: return [0, 0, 0] # Get position directly from USD prim = stage.GetPrimAtPath(self._current_paths[index]) box_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), includedPurposes=[UsdGeom.Tokens.default_]) bound = box_cache.ComputeWorldBound(prim) range = bound.ComputeAlignedBox() bboxMin = range.GetMin() bboxMax = range.GetMax() # Find the top center of the bounding box and add a small offset upward. position = [(bboxMin[0] + bboxMax[0]) * 0.5, bboxMax[1] + TOP_OFFSET, (bboxMin[2] + bboxMax[2]) * 0.5] return position
5,078
Python
35.021276
110
0.611461
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/stage_manager.py
# This class is intended to manage the stage # Using the requested view and the loaded dataset, we need to translate data into Prims # There are 2 variables here. The prims you want to see, and the way you want to see them # the data gives resource counts by subscription, group, or location. # it also contains resource group data and data about all the raw resources # we need to put the resource prims in groups on the stage, floating islands ? # I want to try and build a floating island for each resource group, IE a plane that prims can rest on. # The islands will be 2d planes in 3d space, big enough to accomidate the resources in said group. # the more resources the bigger the island. #we can then postion the islands in novel ways for exploration # Model related # Python built-in from textwrap import fill import time from cgitb import text import os.path from unicodedata import name import carb import locale from pathlib import Path # external python lib import csv import itertools # USD imports from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf # omniverse import omni.client import omni.kit.app import omni.ui as ui import omni.usd import omni.kit.commands import shutil import os import asyncio import omni.kit.notification_manager as nm from .prim_utils import create_plane from .prim_utils import cleanup_prim_path from .prim_utils import get_font_size_from_length from .packer import Node, Block, Packer from omni.kit.window.file_importer import get_file_importer from omni.ui import color as cl #import utilities from .azure_resource_map import shape_usda_name from .data_manager import DataManager from .data_store import DataStore from .scatter_complex import distributePlanes #Import View Models from .group_aad import AADGrpView from .group_group import ResGrpView from .group_sub import SubGrpView from .group_location import LocGrpView from .group_type import TypeGrpView from .group_tag import TagGrpView CURRENT_PATH = Path(__file__).parent DATA_PATH = CURRENT_PATH.joinpath("temp") # The Stage Manager is responsible for drawing the stage based on the ViewType # It will start from scratch and create the Ground plane and groups on the plane # It will render the resources in each group on individual planes class StageManager(): def __init__(self): self._dataManager = DataManager.instance() # Get A Singleton instance self._dataStore = DataStore.instance() # Get A Singleton instance #self._dataManager.add_model_changed_callback(self.model_changed) self.stage_unit_per_meter = 1 #Get Composition Options from UI try: self._scale = self._dataStore._scale_model except: self._scale=1.0 try: self._use_packing_algo = self._dataStore._use_packing_algo except: self._use_packing_algo = False try: self._use_symmetric_planes = self._dataStore._use_symmetric_planes except: self._use_symmetric_planes = False try: self._last_view_type = self._dataStore._last_view_type except: self._last_view_type = "ByGroup" if self._last_view_type is None: self._last_view_type = "ByGroup" self._upAxis="Z" self._shapeUpAxis="Z" self.ActiveView = self.SetActiveView(self._last_view_type) def SetActiveView(self, viewType:str): #Set a subclass to handle the View Creation if viewType == "ByGroup": view = ResGrpView(viewPath="RGrps", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis, symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo) if viewType == "ByLocation": view = LocGrpView(viewPath="Locs", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis, symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo) if viewType == "ByType": view = TypeGrpView(viewPath="Types", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis, symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo) if viewType == "BySub": view = SubGrpView(viewPath="Subs", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis, symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo) if viewType == "ByTag": view = TagGrpView(viewPath="Tags", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis, symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo) return view # def model_changed(): # pass #Invoked from UI - Show the Stages based on the View. def ShowStage(self, viewType:str): #Reset view data self._dataStore._lcl_sizes = [] self._dataStore._lcl_groups = [] self._dataStore._lcl_resources = [] self.ActiveView = self.SetActiveView(viewType) #populate the stage self.ActiveView.initializeStage(self.stage_unit_per_meter) #Base Method self.ActiveView.calcGroupPlaneSizes() #Abstract Method self.ActiveView.calulateCosts() #Abstract Method transforms = self.getTransforms() #Cooredinates for the group planes #sort the groups to add largest first self._dataStore._lcl_groups.sort(key=lambda element: element['size'], reverse=True) self._dataStore._lcl_sizes.sort(reverse=True) asyncio.ensure_future(self.AddLightsToStage()) #Create the groups in an async loop grpCnt = len(self._dataStore._lcl_groups) if (grpCnt) >0 : asyncio.ensure_future(self.ActiveView.CreateGroups(transforms=transforms)) self.ActiveView.loadResources() #Abstract Method self.sendNotify("Stage loading complete: " + str(grpCnt) + " groups loaded.", nm.NotificationStatus.INFO) #Load the resources by group def LoadResources(self, viewType:str): self.ActiveView = self.SetActiveView(viewType) self.ActiveView.initializeStage(self.stage_unit_per_meter) #Base Method self.ActiveView.calcGroupPlaneSizes() #Abstract Method self.ActiveView.calulateCosts() #Abstract Method #View is already set, show resources for specific or all paths if self.ActiveView is None: self.ActiveView = self.SetActiveView(self._last_view_type) self.ActiveView.loadResources() #Abstract Method #Gets the x,y,z coordinates to place the grouping planes def getTransforms(self): if (self._dataStore._use_packing_algo): #Use Packer Algorithm to determine positioning transforms = [] blocks = [] if len(self._dataStore._lcl_sizes) >0: sorted_sizes = sorted(self._dataStore._lcl_sizes, reverse=True) for size in sorted_sizes: sz = (size*2) #double the size end to end blocks.append(Block((sz,sz))) pack = Packer() pack.fit(blocks) for block in blocks: if block.fit: fitX = block.fit.location[0] fitY = block.fit.location[1] fitZ = 0 transforms.append(Gf.Vec3f(fitX, fitY ,fitZ)) #print("size: {} loc: {},{}".format(str(block.size[0]), str(block.fit.location[0]), str(block.fit.location[1]))) else: print("not fit: {}".format(block.size[0])) return transforms else: #Use the scatter distribution method maxDims = (self._dataStore._options_count_models[0].as_float * self._dataStore._options_count_models[1].as_float * self._dataStore._options_count_models[2].as_float) grpCnt = len(self._dataStore._lcl_groups) if grpCnt > maxDims: self.sendNotify("Not enough dimensions for ..." + str(grpCnt) + "res groups, Max Dims: " + str(maxDims), nm.NotificationStatus.WARNING) return if grpCnt >0: #Use Customized Scatter algorithm get coordinates for varying sized planes transforms = distributePlanes( UpAxis=self._upAxis, count=[m.as_int for m in self._dataStore._options_count_models], distance=[m.as_float for m in self._dataStore._options_dist_models], sizes=self._dataStore._lcl_sizes, randomization=[m.as_float for m in self._dataStore._options_random_models], seed=0, scaleFactor=self._dataStore._composition_scale_model.as_float) return transforms async def AddLightsToStage(self): stage = omni.usd.get_context().get_stage() try: if stage.GetPrimAtPath('/Environment/sky'): omni.kit.commands.execute('DeletePrimsCommand', paths=['/Environment/sky']) except: pass #ignore this await omni.kit.app.get_app().next_update_async() omni.kit.commands.execute('CreateDynamicSkyCommand', sky_url='http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Skies/Dynamic/NightSky.usd', sky_path='/Environment/sky') omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path('/Environment/sky.xformOp:rotateZYX'), value=Gf.Vec3f(90.0, 0.0, 0.0), prev=Gf.Vec3f(0.0, 0.0, 0.0)) def Select_Planes(self): if self.ActiveView is None: self.ActiveView = self.SetActiveView(self._last_view_type) self.ActiveView.selectGroupPrims() else: self.ActiveView.selectGroupPrims() def get_size(self, element): return element['size'] def ShowCosts(self): if self.ActiveView is None: self.ActiveView = self.SetActiveView(self._last_view_type) asyncio.ensure_future(self.ActiveView.showHideCosts()) # Set Color # next_shape.GetDisplayColorAttr().Set( # category_colors[int(cluster) % self.max_num_clusters]) def clicked_ok(self): pass def sendNotify(self, message:str, status:nm.NotificationStatus): # https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager# import omni.kit.notification_manager as nm ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok) nm.post_notification( message, hide_after_timeout=True, duration=5, status=status, button_infos=[] ) #log the vectors def log_transforms(self, vectors): for v in vectors: logdata = str(vectors[v][0]) + "," + str(vectors[v][1]) + "," + str(vectors[v][2]) print(logdata)
11,398
Python
36.49671
177
0.631953
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/prim_utils.py
__all__ = ["create_plane", "get_font_size_from_length", "get_parent_child_prim_path", "create_and_place_prim", "log_transforms", "only_select_parent_prims"] import sys from tokenize import Double import omni.usd import omni.kit.commands import shutil import carb from pathlib import Path from pxr import Sdf from pxr import Gf, UsdGeom, UsdLux from .pillow_text import draw_text_on_image_at_position CURRENT_PATH = Path(__file__).parent RES_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("data\\resources") async def create_and_place_prim(self, prim_type:str, prim_name:str, grp_name:str, sub_name:str, loc_name:str, cost:str, new_prim_path:str, shapeToRender:str, scale:float, position:Gf.Vec3f ): carb.log_info("Creating new prim: " + prim_type + " @ "+ new_prim_path + " shape: " + shapeToRender) stage = omni.usd.get_context().get_stage() # Create prim to add the reference to. try: prim = stage.DefinePrim(new_prim_path) prim.GetReferences().AddReference(shapeToRender) except: carb.log_error("Invalid prim path:" + str(new_prim_path)) return my_new_prim = stage.GetPrimAtPath(new_prim_path) my_new_prim.SetCustomDataByKey('res_type', prim_type) my_new_prim.SetCustomDataByKey('res_name', prim_name) my_new_prim.SetCustomDataByKey('res_sub', sub_name) my_new_prim.SetCustomDataByKey('res_grp', grp_name) my_new_prim.SetCustomDataByKey('res_loc', loc_name) my_new_prim.SetCustomDataByKey('res_cost', cost) #Default rotation rotation = Gf.Vec3f(0,0,0) translate = Gf.Vec3d(position[0], position[1], position[2]) #Are we still set to default? Change cube size and position if shapeToRender == "omniverse://localhost/MCE/3dIcons/scene.usd": scale = 3.0 position[2] = position[2] + 30 #Buffer the cube off the z #CUSTOM SHAPE OVERRIDES if prim_name.lower() == "observation_chair": scale =0.8 rotation = Gf.Vec3f(90,0,220) translate=Gf.Vec3d(position[0]+200, position[1]+200, position[2]) if prim_name.lower() == "leather_jacket": scale =0.25 rotation = Gf.Vec3f(90,0,0) translate=Gf.Vec3d(position[0]-20, position[1], position[2]-25) if prim_name.lower() == "coat_rack": scale =0.55 rotation = Gf.Vec3f(90,0,0) translate=Gf.Vec3d(position[0]-220, position[1]+210, position[2]+10) carb.log_info("Placing prim: " + shapeToRender + " | " + str(new_prim_path) + " @ " + "scl:" + str(scale) + " x:" + str(position[0]) + "," + " y:" + str(position[1]) + "," + " z:" + str(position[2])) api = UsdGeom.XformCommonAPI(my_new_prim) try: api.SetTranslate(translate) api.SetRotate(rotation,UsdGeom.XformCommonAPI.RotationOrderXYZ) api.SetScale(Gf.Vec3f(scale,scale,scale)) except: carb.log_error("Oops!", sys.exc_info()[0], "occurred.") #log the vectors def log_transforms(self, vectors): for v in vectors: logdata = str(vectors[v][0]) + "," + str(vectors[v][1]) + "," + str(vectors[v][2]) print(logdata) def draw_image(self, output_file:str, src_file:str, textToDraw:str, costToDraw:str): font = RES_PATH.joinpath("airstrike.ttf") font_size = get_font_size_from_length(len(textToDraw)) draw_text_on_image_at_position( input_image_path=src_file, output_image_path=output_file, textToDraw=str(textToDraw), costToDraw=str(costToDraw), x=180, y=1875, fillColor="White", font=font, fontSize=font_size ) #Creates a plane of a certain size in a specific location def create_plane(self,Path:str, Name :str, Size: int, Location: Gf.Vec3f, Color:Gf.Vec3f): stage_ref = omni.usd.get_context().get_stage() omni.kit.commands.execute('AddGroundPlaneCommand', stage=stage_ref, planePath=Path, axis="Z", size=Size, position=Location, color=Color) def cleanup_prim_path(self, Name: str): #print("cleanup: " + Name) nme = Name.replace("-", "_") nme = nme.replace(" ", "_") nme = nme.replace("/", "_") nme = nme.replace(".", "_") nme = nme.replace(":", "_") nme = nme.replace(";", "_") nme = nme.replace("(", "_") nme = nme.replace(")", "_") nme = nme.replace("[", "_") nme = nme.replace("]", "_") nme = nme.replace("#", "_") #if it starts with a number add a _ if nme[0].isnumeric(): nme = "_" + nme #dont start with a - if nme[0] == "-": nme = nme[1:len(nme[0])-1] #print("cleanup res: " + nme) return nme # Concats two Sdf.Paths and truncates he result to MAX_PATH_LENGTH def get_parent_child_prim_path(self, groupPath:Sdf.Path, resName:str): resName = cleanup_prim_path(self, resName) #prim_len = len(str(groupPath)) + len(resName) #if (prim_len) > 70: # diff = prim_len - 70 # trim = len(resName) - diff # resName = resName[:trim] try: shape_prim_path = Sdf.Path(groupPath.AppendPath(resName)) return shape_prim_path except: print("Oops!", sys.exc_info()[0], "occurred.") def only_select_parent_prims(prim_paths): paths = [] for path in prim_paths: if str(path).find("Collision") != -1: continue #skip paths with Collision in them if str(path).find("Baked") != -1: continue #skip paths with Baked in them parts = path.split("/") if parts[2] == "Looks": continue if parts[1] == "Environment": continue #Select the root object only. if len(parts) == 3: parentPath = "/" + parts[1] + "/" + parts[2] if len(parts) == 4: parentPath = "/" + parts[1] + "/" + parts[2] + "/" + parts[3] if len(parts) == 5: parentPath = "/" + parts[1] + "/" + parts[2] + "/" + parts[3] + "/" + parts[4] paths.append(parentPath) return paths def get_font_size_from_length(nameLength:int): if (nameLength < 10): font_size = 160 elif (nameLength < 15): font_size = 140 elif (nameLength < 20): font_size = 120 elif (nameLength < 30): font_size = 100 elif (nameLength < 50): font_size = 80 elif (nameLength < 60): font_size = 70 elif (nameLength < 70): font_size = 60 elif (nameLength < 80): font_size = 44 return font_size
6,658
Python
30.861244
156
0.580955
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/constant.py
from enum import Enum, IntEnum import carb.settings FONT_OFFSET_Y = carb.settings.get_settings().get("/app/font/offset_y") or 0 class FontSize: Normal = 14 # Default (In VIEW, 18 pixels in real, configed by /app/font/size) Large = 16 # real size = round(18*16/14) = 21 XLarge = 18 # real size = round(18*18/14) = 23 XXLarge = 20 # real size = round(18*20/14) = 26 XXXLarge = 22 # real size = round(18*22/14) = 28 Small = 12 # real size = round(18*12/14) = 15 XSmall = 10 # real size = round(18*10/14) = 13 XXSmall = 8 # real size = round(18*8/14) = 10 XXXSmall = 6 # real size = round(18*6/14) = 8 class MouseKey(IntEnum): NONE = -1 LEFT = 0 RIGHT = 1 MIDDLE = 2 class COLORS: CLR_0 = 0xFF080808 CLR_1 = 0xFF181818 CLR_2 = 0xFF282828 CLR_3 = 0xFF383838 CLR_4 = 0xFF484848 CLR_5 = 0xFF585858 CLR_6 = 0xFF686868 CLR_7 = 0xFF787878 CLR_8 = 0xFF888888 CLR_9 = 0xFF989898 CLR_A = 0xFFA8A8A8 CLR_B = 0xFFB8B8B8 CLR_C = 0xFFCCCCCC CLR_D = 0xFFE0E0E0 CLR_E = 0xFFF4F4F4 LIGHRT_GARY = 0xFF808080 GRAY = 0xFF606060 DARK_GRAY = 0xFF404040 DARK_DARK = 0xFF202020 TRANSPARENT = 0x0000000 # Color for buttons selected/activated state in NvidiaLight. L_SELECTED = 0xFFCFCCBF # Color for buttons selected/activated state in NvidiaDark. D_SELECTED = 0xFF4F383F BLACK = 0xFF000000 WHITE = 0xFFFFFFFF TEXT_LIGHT = CLR_D TEXT_DISABLED_LIGHT = 0xFFA0A0A0 TEXT_DARK = CLR_C TEXT_DISABLED_DARK = 0xFF8B8A8A TEXT_SELECTED = 0xFFC5911A WIDGET_BACKGROUND_LIGHT = 0xFF535354 WIDGET_BACKGROUND_DARK = 0xFF23211F BUTTON_BACKGROUND_LIGHT = 0xFFD6D6D6 LINE_SEPARATOR = 0xFFACACAC LINE_SEPARATOR_THICK = 0xFF666666 class LightColors: Background = COLORS.WIDGET_BACKGROUND_LIGHT BackgroundSelected = COLORS.CLR_4 BackgroundHovered = COLORS.CLR_4 Text = COLORS.CLR_D TextDisabled = COLORS.TEXT_DISABLED_LIGHT TextSelected = COLORS.TEXT_SELECTED Button = COLORS.BUTTON_BACKGROUND_LIGHT ButtonHovered = COLORS.CLR_B ButtonPressed = COLORS.CLR_A ButtonSelected = 0xFFCFCCBF WindowBackground = COLORS.CLR_D class DarkColors: Background = COLORS.WIDGET_BACKGROUND_DARK BackgroundSelected = 0xFF6E6E6E BackgroundHovered = 0xFF6E6E6E Text = COLORS.CLR_C TextDisabled = COLORS.TEXT_DISABLED_DARK TextSelected = COLORS.TEXT_SELECTED Button = COLORS.WIDGET_BACKGROUND_DARK ButtonHovered = 0xFF9E9E9E ButtonPressed = 0xFF787569 ButtonSelected = 0xFF4F383F WindowBackground = 0xFF454545
2,654
Python
25.55
83
0.674077
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/azure_map_primitives.py
shape_usda_name = { "AAD": "omniverse://localhost/MCE/cube.usda", "Resource Group": "omniverse://localhost/MCE/cube.usda", "Storage account": "omniverse://localhost/MCE/cube.usda", "App Service": "omniverse://localhost/MCE/cube.usda", "Subscription": "omniverse://localhost/MCE/cube.usda", "API Connection" : "omniverse://localhost/MCE/cube.usda", "API Management service" : "omniverse://localhost/MCE/cube.usda", "App Configuration" : "omniverse://localhost/MCE/cube.usda", "App Service plan" : "omniverse://localhost/MCE/cube.usda", "App Service" : "omniverse://localhost/MCE/cube.usda", "Application Insights" : "omniverse://localhost/MCE/cube.usda", "Application gateway" : "omniverse://localhost/MCE/cube.usda", "Automation Account" : "omniverse://localhost/MCE/cube.usda", "Availability test": "omniverse://localhost/MCE/cube.usda", "Azure Cosmos DB API for MongoDB account" : "omniverse://localhost/MCE/cube.usda", "Azure Cosmos DB account" : "omniverse://localhost/MCE/cube.usda", "Azure Data Explorer Cluster" : "omniverse://localhost/MCE/cube.usda", "Azure DevOps organization" : "omniverse://localhost/MCE/cube.usda", "Azure Machine Learning" : "omniverse://localhost/MCE/cube.usda", "Azure Workbook" : "omniverse://localhost/MCE/cube.usda", "Bastion" : "omniverse://localhost/MCE/cube.usda", "Cognitive Service" : "omniverse://localhost/MCE/cube.usda", "Container registry" : "omniverse://localhost/MCE/cube.usda", "Data Lake Analytics" : "omniverse://localhost/MCE/cube.usda", "Data Lake Storage Gen1" : "omniverse://localhost/MCE/cube.usda", "Data factory (V2)" : "omniverse://localhost/MCE/cube.usda", "Disk" : "omniverse://localhost/MCE/cube.usda", "Event Grid System Topic" : "omniverse://localhost/MCE/cube.usda", "Event Hubs Namespace" : "omniverse://localhost/MCE/cube.usda", "Firewall Policy" : "omniverse://localhost/MCE/cube.usda", "Firewall" : "omniverse://localhost/MCE/cube.usda", "Function App" : "omniverse://localhost/MCE/cube.usda", "Image" : "omniverse://localhost/MCE/cube.usda", "Key vault" : "omniverse://localhost/MCE/cube.usda", "Kubernetes service" : "omniverse://localhost/MCE/cube.usda", "Language understanding" : "omniverse://localhost/MCE/cube.usda", "Load balancer" : "omniverse://localhost/MCE/cube.usda", "Log Analytics query pack" : "omniverse://localhost/MCE/cube.usda", "Log Analytics workspace" : "omniverse://localhost/MCE/cube.usda", "Logic App (Standard)" : "omniverse://localhost/MCE/cube.usda", "Logic app" : "omniverse://localhost/MCE/cube.usda", "Logic apps custom connector" : "omniverse://localhost/MCE/cube.usda", "Managed Identity" : "omniverse://localhost/MCE/cube.usda", "Network Interface" : "omniverse://localhost/MCE/cube.usda", "Network Watcher" : "omniverse://localhost/MCE/cube.usda", "Network security group" : "omniverse://localhost/MCE/cube.usda", "Power BI Embedded" : "omniverse://localhost/MCE/cube.usda", "Private DNS zone" : "omniverse://localhost/MCE/cube.usda", "Private endpoint" : "omniverse://localhost/MCE/cube.usda", "Public IP address" : "omniverse://localhost/MCE/cube.usda", "Recovery Services vault" : "omniverse://localhost/MCE/cube.usda", "Restore Point Collection" : "omniverse://localhost/MCE/cube.usda", "Runbook" : "omniverse://localhost/MCE/cube.usda", "SQL database" : "omniverse://localhost/MCE/cube.usda", "SQL elastic pool" : "omniverse://localhost/MCE/cube.usda", "SQL server" : "omniverse://localhost/MCE/cube.usda", "SQL virtual machine" : "omniverse://localhost/MCE/cube.usda", "Search service" : "omniverse://localhost/MCE/cube.usda", "Service Bus Namespace" : "omniverse://localhost/MCE/cube.usda", "Service Fabric cluster" : "omniverse://localhost/MCE/cube.usda", "Shared dashboard" : "omniverse://localhost/MCE/cube.usda", "Snapshot" : "omniverse://localhost/MCE/cube.usda", "Solution" : "omniverse://localhost/MCE/cube.usda", "Storage account" : "omniverse://localhost/MCE/cube.usda", "Traffic Manager profile" : "omniverse://localhost/MCE/cube.usda", "Virtual machine scale set" : "omniverse://localhost/MCE/cube.usda", "Virtual machine" : "omniverse://localhost/MCE/cube.usda", "Virtual network" : "omniverse://localhost/MCE/cube.usda", "Web App Bot" : "omniverse://localhost/MCE/cube.usda", }
4,482
Python
58.773333
86
0.682954
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/views.py
# import from omniverse from ctypes import alignment from omni.ui.workspace_utils import TOP # import from other extension py from .combo_box_model import ComboBoxModel from .style_button import button_styles from .style_meta import meta_window_style, get_gradient_color, build_gradient_image from .style_meta import cl_combobox_background, cls_temperature_gradient, cls_color_gradient, cls_tint_gradient, cls_grey_gradient, cls_button_gradient from .data_manager import DataManager from .data_store import DataStore from .button import SimpleImageButton import sys import asyncio import webbrowser #from turtle import width import omni.ext import omni.ui as ui from omni.ui import color as cl import os import carb import omni.kit.commands import omni.kit.pipapi from pxr import Sdf, Usd, Gf, UsdGeom import omni from pathlib import Path import omni.kit.notification_manager as nm from .omni_utils import get_selection from .combo_box_model import ComboBoxModel from .omni_utils import duplicate_prims from .stage_manager import StageManager from .import_fbx import convert_asset_to_usd from .prim_utils import create_plane import random LABEL_WIDTH = 120 WINDOW_NAME = "Meta Cloud Explorer" SPACING = 4 CURRENT_PATH = Path(__file__).parent DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("data\\resources") class MainView(ui.Window): """The class that represents the window""" def __init__(self, title: str = None, menu_path:str = "", delegate=None, **kwargs): super().__init__(title, width=640, height=480, **kwargs) self.__label_width = LABEL_WIDTH self._viewport_scene = None self.objModel = kwargs["objectModel"] self.widModel = kwargs["widgetModel"] self._menu_path = menu_path #Helper Class instances self._stageManager = StageManager() self._dataManager = DataManager.instance() self._dataStore = DataStore.instance() #Get notified when visibility changes self.set_visibility_changed_fn(self._on_visibility_changed) #Get notifed when the datamodel changes self._dataManager.add_model_changed_callback(self.model_changed) # Apply the style to all the widgets of this window self.frame.style = meta_window_style # Set the function that is called to build widgets when the window is visible self.frame.set_build_fn(self._build_fn) def __del__(self): self.destroy() def destroy(self): super().destroy() if self._dataManager: self._dataManager.destroy() if self._dataStore: self._dataStore = None if self._stageManager: self._stageManager = None self.objModel= None self.widModel= None if self._viewport_scene: # Empty the SceneView of any elements it may have self._viewport_scene = None # Be a good citizen, and un-register the SceneView from Viewport updates if self._viewport_window: self._viewport_window.viewport_api.remove_scene_view(self._scene_view) # Remove our references to these objects self._viewport_window = None self._scene_view = None self._menu_path = None def on_shutdown(self): self._win = None def show(self): self.visible = True self.focus() def hide(self): self.visible = False def _on_visibility_changed(self, visible): omni.kit.ui.get_editor_menu().set_value(self._menu_path, visible) @property def label_width(self): """The width of the attribute label""" return self.__label_width @label_width.setter def label_width(self, value): """The width of the attribute label""" self.__label_width = value self.frame.rebuild() #___________________________________________________________________________________________________ # Function Definitions #___________________________________________________________________________________________________ def on_docs(self): webbrowser.open_new("https://github.com/USDSync/MetaCloudExplorer/wiki") def on_code(self): webbrowser.open_new("http://metacloudexplorer.com") def on_help(self): webbrowser.open_new("https://github.com/USDSync/MetaCloudExplorer/issues") #Callback invoked when data model changes def model_changed(self): carb.log_info("Model changed!") if (hasattr(self, "_grpLbl")): self._grpLbl.text = "GROUPS: " + str(len(self._dataStore._groups)) if (hasattr(self, "_resLbl")): self._resLbl.text = "RESOURCES: " + str(len(self._dataStore._resources)) #Set defaults from quickstarts def set_defaults(self, defType:str): if defType == "tower": self.sendNotify("MCE: Tower defaults set... Select a VIEW", nm.NotificationStatus.INFO) self._dataStore._symmetric_planes_model.set_value(True) self._dataStore._packing_algo_model.set_value(False) self._dataStore._options_count_models[0].set_value(2) self._dataStore._options_count_models[1].set_value(2) self._dataStore._options_count_models[2].set_value(60) self._dataStore._options_dist_models[0].set_value(500.0) self._dataStore._options_dist_models[1].set_value(500.0) self._dataStore._options_dist_models[2].set_value(250.0) self._dataStore._options_random_models[0].set_value(1) self._dataStore._options_random_models[1].set_value(1) self._dataStore._options_random_models[2].set_value(1) if defType == "symmetric": self.sendNotify("MCE: Symmetric defaults set... Select a VIEW", nm.NotificationStatus.INFO) self._dataStore._symmetric_planes_model.set_value(True) self._dataStore._packing_algo_model.set_value(False) self._dataStore._options_count_models[0].set_value(10) self._dataStore._options_count_models[1].set_value(10) self._dataStore._options_count_models[2].set_value(40) self._dataStore._options_dist_models[0].set_value(500.0) self._dataStore._options_dist_models[1].set_value(500.0) self._dataStore._options_dist_models[2].set_value(250.0) self._dataStore._options_random_models[0].set_value(1) self._dataStore._options_random_models[1].set_value(1) self._dataStore._options_random_models[2].set_value(1) if defType == "islands": self.sendNotify("MCE: Island defaults set... Select a VIEW", nm.NotificationStatus.INFO) self._dataStore._symmetric_planes_model.set_value(False) self._dataStore._packing_algo_model.set_value(False) self._dataStore._options_count_models[0].set_value(20) self._dataStore._options_count_models[1].set_value(4) self._dataStore._options_count_models[2].set_value(4) self._dataStore._options_dist_models[0].set_value(500.0) self._dataStore._options_dist_models[1].set_value(500.0) self._dataStore._options_dist_models[2].set_value(250.0) self._dataStore._options_random_models[0].set_value(1) self._dataStore._options_random_models[1].set_value(1) self._dataStore._options_random_models[2].set_value(1) if defType == "packer": self.sendNotify("MCE: Packer algo enabled... Select a VIEW", nm.NotificationStatus.INFO) self._dataStore._symmetric_planes_model.set_value(False) self._dataStore._packing_algo_model.set_value(True) self._dataStore._options_count_models[0].set_value(4) self._dataStore._options_count_models[1].set_value(4) self._dataStore._options_count_models[2].set_value(20) self._dataStore._options_dist_models[0].set_value(500.0) self._dataStore._options_dist_models[1].set_value(500.0) self._dataStore._options_dist_models[2].set_value(250.0) self._dataStore._options_random_models[0].set_value(1) self._dataStore._options_random_models[1].set_value(1) self._dataStore._options_random_models[2].set_value(1) def show_info_objects(self): self.model.populate() #Load a fresh stage def load_stage(self, viewType: str): self._dataStore._last_view_type = viewType self._dataStore.Save_Config_Data() #Block and clear stage asyncio.ensure_future(self.clear_stage()) self._stageManager.ShowStage(viewType) #load the resource onto the stage def load_resources(self): self._stageManager.LoadResources(self._dataStore._last_view_type) #change the background shaders to reflect costs def showHideCosts(self): self._stageManager.ShowCosts() # Clear the stage async def clear_stage(self): try: stage = omni.usd.get_context().get_stage() root_prim = stage.GetPrimAtPath("/World") if (root_prim.IsValid()): stage.RemovePrim("/World") ground_prim = stage.GetPrimAtPath('/GroundPlane') if (ground_prim.IsValid()): stage.RemovePrim('/GroundPlane') ground_prim = stage.GetPrimAtPath('/RGrp') if (ground_prim.IsValid()): stage.RemovePrim('/RGrp') ground_prim = stage.GetPrimAtPath('/Loc') if (ground_prim.IsValid()): stage.RemovePrim('/Loc') ground_prim = stage.GetPrimAtPath('/AAD') if (ground_prim.IsValid()): stage.RemovePrim('/AAD') ground_prim = stage.GetPrimAtPath('/Subs') if (ground_prim.IsValid()): stage.RemovePrim('/Subs') ground_prim = stage.GetPrimAtPath('/Type') if (ground_prim.IsValid()): stage.RemovePrim('/Type') ground_prim = stage.GetPrimAtPath('/Cost') if (ground_prim.IsValid()): stage.RemovePrim('/Cost') ground_prim = stage.GetPrimAtPath('/Looks') if (ground_prim.IsValid()): stage.RemovePrim('/Looks') ground_prim = stage.GetPrimAtPath('/Tag') if (ground_prim.IsValid()): stage.RemovePrim('/Tag') if stage.GetPrimAtPath('/Environment/sky'): omni.kit.commands.execute('DeletePrimsCommand',paths=['/Environment/sky']) except: pass #ignore failure #___________________________________________________________________________________________________ # Window UI Definitions #___________________________________________________________________________________________________ def _build_fn(self): """The method that is called to build all the UI once the window is visible.""" with ui.ScrollingFrame(): with ui.VStack(height=0): self._build_new_header() self._build_image_presets() self._build_options() self._build_connection() self._build_import() self._build_help() #self.buildSliderTest() # slider = ui.FloatSlider(min=1.0, max=150.0) # slider.model.as_float = 10.0 # label = ui.Label("Omniverse", style={"color": ui.color(0), "font_size": 7.0}) #Pieces of UI Elements def _build_new_header(self): """Build the widgets of the "Source" group""" #with ui.ZStack(): #Background #ui.Image(style={'image_url': "omniverse://localhost/Resources/images/meta_cloud_explorer_800.png", 'fill_policy': ui.FillPolicy.PRESERVE_ASPECT_CROP, 'alignment': ui.Alignment.CENTER_BOTTOM, 'fill_policy':ui.FillPolicy.PRESERVE_ASPECT_CROP}) #Foreground with ui.VStack(): with ui.HStack(): with ui.VStack(): with ui.HStack(): with ui.VStack(): ui.Label("Meta Cloud Explorer", style={"color": cl("#A4B7FD"), "font_size":20}, alignment=ui.Alignment.LEFT, height=0) ui.Label("Cloud Infrastructure Scene Authoring Extension", style={"color": cl("#878683"), "font_size":16}, alignment=ui.Alignment.LEFT, height=0) with ui.VStack(): ui.Spacer(height=15) self._grpLbl = ui.Label("GROUPS: " + str(len(self._dataStore._groups)),style={"color": cl("#2069e0"), "font_size":18 }, alignment=ui.Alignment.RIGHT, height=0) self._resLbl = ui.Label("RESOURCES: " + str(len(self._dataStore._resources)), style={"color": cl("#2069e0"), "font_size":18}, alignment=ui.Alignment.RIGHT, height=0) ui.Line(style={"color": cl("#66b3ff")}, height=20) with ui.VStack(height=0, spacing=SPACING): #ui.Spacer(height=80) with ui.HStack(): ui.Button("< GROUPS >", clicked_fn=lambda: self.load_stage("ByGroup"), name="subs", height=35, style={"color": cl("#bebebe"), "font_size":20 }) ui.Button("< TYPES >", clicked_fn=lambda: self.load_stage("ByType"), name="subs",height=35, style={"color": cl("#bebebe"), "font_size":20 }) with ui.VStack(height=0, spacing=SPACING): #ui.Spacer(height=120) with ui.HStack(): ui.Button("< LOCATIONS >", clicked_fn=lambda: self.load_stage("ByLocation"), name="subs", height=35, style={"color": cl("#bebebe"), "font_size":20 }) ui.Button("< SUBSCRIPTIONS >", clicked_fn=lambda: self.load_stage("BySub"), name="subs", height=35, style={"color": cl("#bebebe"), "font_size":20 }) with ui.VStack(height=0, spacing=SPACING): #ui.Spacer(height=120) with ui.HStack(): ui.Button("Clear Stage", clicked_fn=lambda: asyncio.ensure_future(self.clear_stage()), name="clr", height=35) ui.Button("Show/Hide Costs", clicked_fn=lambda: self.showHideCosts(),name="subs", height=35) #ui.Button("Show Object Info", clicked_fn=lambda: self.show_info_objects(),name="clr", height=35) ui.Button("Select All Groups", clicked_fn=lambda: self.select_planes(),name="clr", height=35) # with ui.HStack(): # ui.Button("Network View", clicked_fn=lambda: self.load_stage("ByNetwork"), height=15) # ui.Button("Cost View", clicked_fn=lambda: self.load_stage("ByCost"), height=15) # ui.Button("Template View", clicked_fn=lambda: self.load_stage("Template"), height=15) def _build_import(self): with ui.CollapsableFrame("Import Offline Files", name="group", collapsed=True, style={"color": cl("#2069e0"), "font_size":20}): with ui.VStack(style={"color": 0xFFFFFFFF, "font_size":16}): ui.Label("Resource Groups file path:", height=10, width=120) with ui.HStack(): self._rg_data_import_field = ui.StringField(height=15) self._rg_data_import_field.enabled = True self._rg_data_import_field.model.set_value(str(self._dataStore._rg_csv_file_path)) self._dataStore._rg_csv_field_model = self._rg_data_import_field.model ui.Button("Load", width=40, clicked_fn=lambda: self._dataManager.select_file("rg")) ui.Label("All Resources file path:", height=10, width=120) with ui.HStack(): self._rs_data_import_field = ui.StringField(height=15) self._rs_data_import_field.enabled = True self._rs_data_import_field.model.set_value(str(self._dataStore._rs_csv_file_path)) self._dataStore._rs_csv_field_model = self._rs_data_import_field.model ui.Button("Load", width=40, clicked_fn=lambda: self._dataManager.select_file("res")) with ui.HStack(): ui.Button("Clear imported Data", clicked_fn=lambda: self._dataManager.wipe_data()) ui.Button("Import Selected Files", clicked_fn=lambda: self._dataManager.load_csv_files()) with ui.HStack(): ui.Button("Load Small Company", clicked_fn=lambda: self._dataManager.load_small_company()) ui.Button("Load Large Company", clicked_fn=lambda: self._dataManager.load_large_company()) ui.Button("Load Shapes Library", clicked_fn=lambda: self._dataManager.load_sample_resources()) def _build_connection(self): def _on_value_changed(field:str, value): if field == "tenant": self._dataStore._azure_tenant_id = value if field == "client": self._dataStore._azure_client_id = value if field == "subid": self._dataStore._azure_subscription_id = value if field == "secret": self._dataStore._azure_client_secret = value # def setText(label, text): # '''Sets text on the label''' # # This function exists because lambda cannot contain assignment # label.text = f"You wrote '{text}'" with ui.CollapsableFrame("Cloud API Connection", name="group", collapsed=True, style={"color": cl("#2069e0"), "font_size":20}): with ui.VStack(style={"color": 0xFFFFFFFF, "font_size":16}): # with ui.CollapsableFrame("Azure API Connection", name="group", collapsed=True): # with ui.VStack(): ui.Label("Tenant Id",width=self.label_width) self._tenant_import_field = ui.StringField(height=15) self._tenant_import_field.enabled = True self._tenant_import_field.model.set_value(str(self._dataStore._azure_tenant_id)) self._tenant_import_field.model.add_value_changed_fn(lambda m: _on_value_changed("tenant", m.get_value_as_string())) ui.Label("Client Id",width=self.label_width) self._client_import_field = ui.StringField(height=15) self._client_import_field.enabled = True self._client_import_field.model.set_value(str(self._dataStore._azure_client_id)) self._client_import_field.model.add_value_changed_fn(lambda m: _on_value_changed("client", m.get_value_as_string())) ui.Label("Subscription Id",width=self.label_width) self._subscription_id_field = ui.StringField(height=15) self._subscription_id_field.enabled = True self._subscription_id_field.model.set_value(str(self._dataStore._azure_subscription_id)) self._subscription_id_field.model.add_value_changed_fn(lambda m: _on_value_changed("subid", m.get_value_as_string())) ui.Label("Client Secret",width=self.label_width) self._client_secret_field = ui.StringField(height=15, password_mode=True) self._client_secret_field.enabled = True self._client_secret_field.model.set_value(str(self._dataStore._azure_client_secret)) self._client_secret_field.model.add_value_changed_fn(lambda m: _on_value_changed("secret", m.get_value_as_string())) ui.Button("Connect to Azure", clicked_fn=lambda: self._dataManager.load_from_api()) def _build_axis(self, axis_id, axis_name): """Build the widgets of the "X" or "Y" or "Z" group""" with ui.CollapsableFrame(axis_name, name="group", collapsed=True): with ui.VStack(height=0, spacing=SPACING): with ui.HStack(): ui.Label("Group Count", name="attribute_name", width=self.label_width) ui.IntDrag(model=self._dataStore._options_count_models[axis_id], min=1, max=500) with ui.HStack(): ui.Label("Distance", name="attribute_name", width=self.label_width) ui.FloatDrag(self._dataStore._options_dist_models[axis_id], min=250, max=5000) with ui.HStack(): ui.Label("Randomness", name="attribute_name", width=self.label_width) ui.FloatDrag(self._dataStore._options_random_models[axis_id], min=1.0, max=10.0) def _build_image_presets(self): def _on_clicked(self, source): self.set_defaults(source) #add selection rectangle with ui.CollapsableFrame("Quickstarts", name="group", collapsed=True, style={"color":cl("#2069e0"), "font_size":20}): with ui.VStack(style={"color": 0xFFFFFFFF}): with ui.HStack(style={}): with ui.VStack(): ui.Label("TOWER", name="attribute_name", width=self.label_width) SimpleImageButton(image="omniverse://localhost/MCE/images/tower.png", size=125, name="twr_btn", clicked_fn=lambda: _on_clicked(self, source="tower")) with ui.VStack(): ui.Label("ISLANDS", name="attribute_name", width=self.label_width) SimpleImageButton(image="omniverse://localhost/MCE/images/islands.png", size=125, name="isl_btn", clicked_fn=lambda: _on_clicked(self, source="islands")) with ui.VStack(): ui.Label("SYMMETRIC", name="attribute_name", width=self.label_width) SimpleImageButton(image="omniverse://localhost/MCE/images/Symmetric.png", size=125, name="sym_btn", clicked_fn=lambda: _on_clicked(self, source="symmetric")) with ui.VStack(): ui.Label("BIN PACKER", name="attribute_name", width=self.label_width) SimpleImageButton(image="omniverse://localhost/MCE/images/packer.png", size=125, name="row_btn",clicked_fn=lambda: _on_clicked(self, source="packer")) def _build_image_options(self): with ui.CollapsableFrame("Group Images", name="group", collapsed=True): with ui.VStack(height=0, spacing=SPACING): with ui.HStack(): ui.Label("BG Low Cost", name="attribute_name", width=self.label_width) self._bgl_data_import_field = ui.StringField(height=15) self._bgl_data_import_field.enabled = True self._bgl_data_import_field.model.set_value(str(self._dataStore._bgl_file_path)) self._dataStore._bgl_field_model = self._bgl_data_import_field.model ui.Button("Load", width=40, clicked_fn=lambda: self._dataManager.select_file("bgl")) with ui.HStack(): ui.Label("Bg Mid Cost", name="attribute_name", width=self.label_width) self._bgm_data_import_field = ui.StringField(height=15) self._bgm_data_import_field.enabled = True self._bgm_data_import_field.model.set_value(str(self._dataStore._bgm_file_path)) self._dataStore._bgm_field_model = self._bgm_data_import_field.model ui.Button("Load", width=40, clicked_fn=lambda: self._dataManager.select_file("bgm")) with ui.HStack(): ui.Label("Bg High Cost", name="attribute_name", width=self.label_width) self._bgh_data_import_field = ui.StringField(height=15) self._bgh_data_import_field.enabled = True self._bgh_data_import_field.model.set_value(str(self._dataStore._bgh_file_path)) self._dataStore._bgh_field_model = self._bgh_data_import_field.model ui.Button("Load", width=40, clicked_fn=lambda: self._dataManager.select_file("bgh")) def _build_options(self, default_value=0, min=0, max=1): def _on_value_changed_bp(model): self._dataStore._use_packing_algo = model.as_bool def _on_value_changed_sg(model): self._dataStore._use_symmetric_planes = model.as_bool def _on_value_changed_wd(model): self._dataStore._show_info_widgets = model.as_bool with ui.CollapsableFrame("Scene Composition Options", name="group", collapsed=True, style={"color": cl("#2069e0"), "font_size":20}): with ui.VStack(height=0, spacing=SPACING, style={"color": 0xFFFFFFFF, "font_size":16}): with ui.HStack(): #self._dataStore._composition_scale_model = self._build_gradient_float_slider("Scale Factor", default_value=10, min=1, max=100) ui.Label("Object Scale", name="attribute_name", width=self.label_width, min=1, max=100) ui.FloatDrag(self._dataStore._composition_scale_model, min=1, max=100) self._dataStore._composition_scale_model.set_value(self._dataStore._scale_model) with ui.HStack(): ui.Label("Use Symmetric groups?", name="attribute_name", width=self.label_width) cb1 = ui.CheckBox(self._dataStore._symmetric_planes_model) cb1.model.add_value_changed_fn(lambda model: _on_value_changed_sg(model)) with ui.HStack(): ui.Label("Use Bin Packing?", name="attribute_name", width=self.label_width) cb2 = ui.CheckBox(self._dataStore._packing_algo_model) cb2.model.add_value_changed_fn(lambda model: _on_value_changed_bp(model)) with ui.HStack(): ui.Label("Show Info UI on select?", name="attribute_name", width=self.label_width) cb3 = ui.CheckBox(self._dataStore._show_info_widgets_model) cb3.model.add_value_changed_fn(lambda model: _on_value_changed_wd(model)) self._build_image_options() self._build_axis(0, "Groups on X Axis") self._build_axis(1, "Groups on Y Axis") self._build_axis(2, "Groups on Z Axis") def _build_help(self): with ui.CollapsableFrame("About", name="group", collapsed=True, style={"color": cl("#2069e0"), "font_size":20}): with ui.VStack(height=0, spacing=SPACING, style={"color": 0xFFFFFFFF, "font_size":16}): with ui.HStack(): with ui.VStack(): with ui.HStack(): ui.Label("Meta Cloud Explorer (MCE)", clicked_fn=lambda: self.on_docs(), height=15) ui.Label("v1.0.0", clicked_fn=lambda: self.on_docs(), height=15) with ui.HStack(): with ui.VStack(): with ui.HStack(): ui.Label("The true power of the Metaverse is to gain new insights to existing problems by experiencing things in a different way, a simple change in perspective!", style={"color":0xFF000000}, elided_text=True, ) with ui.HStack(): with ui.VStack(): with ui.HStack(): ui.Line(style={"color": cl("#bebebe")}, height=20) ui.Button("Docs", clicked_fn=lambda: self.on_docs(), height=15) ui.Button("Code", clicked_fn=lambda: self.on_code(), height=15) ui.Button("Help", clicked_fn=lambda: self.on_help(), height=15) def __build_value_changed_widget(self): with ui.VStack(width=20): ui.Spacer(height=3) rect_changed = ui.Rectangle(name="attribute_changed", width=15, height=15, visible= False) ui.Spacer(height=4) with ui.HStack(): ui.Spacer(width=3) rect_default = ui.Rectangle(name="attribute_default", width=5, height=5, visible= True) return rect_changed, rect_default def _build_gradient_float_slider(self, label_name, default_value=0, min=0, max=1): def _on_value_changed(model, rect_changed, rect_defaul): if model.as_float == default_value: rect_changed.visible = False rect_defaul.visible = True else: rect_changed.visible = True rect_defaul.visible = False def _restore_default(slider): slider.model.set_value(default_value) with ui.HStack(): ui.Label(label_name, name=f"attribute_name", width=self.label_width) with ui.ZStack(): button_background_gradient = build_gradient_image(cls_button_gradient, 22, "button_background_gradient") with ui.VStack(): ui.Spacer(height=1.5) with ui.HStack(width=200): slider = ui.FloatSlider(name="float_slider", height=0, min=min, max=max) slider.model.set_value(default_value) ui.Spacer(width=1.5) ui.Spacer(width=4) rect_changed, rect_default = self.__build_value_changed_widget() # switch the visibility of the rect_changed and rect_default to indicate value changes slider.model.add_value_changed_fn(lambda model: _on_value_changed(model, rect_changed, rect_default)) # add call back to click the rect_changed to restore the default value rect_changed.set_mouse_pressed_fn(lambda x, y, b, m: _restore_default(slider)) return button_background_gradient def sendNotify(self, message:str, status:nm.NotificationStatus): # https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager# import omni.kit.notification_manager as nm ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok) nm.post_notification( message, hide_after_timeout=True, duration=5, status=status, button_infos=[] ) def clicked_ok(self): pass def buildSliderTest(self): style = { "Button": {"stack_direction": ui.Direction.TOP_TO_BOTTOM}, "Button.Image": { "color": 0xFFFFCC99, "image_url": "resources/icons/Learn_128.png", "alignment": ui.Alignment.CENTER, }, "Button.Label": {"alignment": ui.Alignment.CENTER}, } def layout(model, button, padding, style=style): padding = "padding" if padding else "margin" style["Button"][padding] = model.get_value_as_float() button.set_style(style) def spacing(model, button): button.spacing = model.get_value_as_float() button = ui.Button("Label", style=style, width=64, height=64) with ui.HStack(width=ui.Percent(50)): ui.Label("padding", name="text") model = ui.FloatSlider(min=0, max=500).model model.add_value_changed_fn(lambda m, b=button: layout(m, b, 1)) with ui.HStack(width=ui.Percent(50)): ui.Label("margin", name="text") model = ui.FloatSlider(min=0, max=500).model model.add_value_changed_fn(lambda m, b=button: layout(m, b, 0)) with ui.HStack(width=ui.Percent(50)): ui.Label("Button.spacing", name="text") model = ui.FloatSlider(min=0, max=50).model model.add_value_changed_fn(lambda m, b=button: spacing(m, b))
32,845
Python
49.845201
254
0.565505
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/azure_resource_map_backup.py
shape_usda_name = { "AAD":"omniverse://localhost/MCE/3dIcons/AzureAAD_1.1.usd", "Resource_Group":"omniverse://localhost/MCE/3dIcons/Resource_Groups_2.0.usd", "Storage_account":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2_8.usd", "App_Service":"omniverse://localhost/MCE/3dIcons/AppServices_1_2.usd", "Subscription":"omniverse://localhost/MCE/3dIcons/Subscriptions_1_3.usd", "API_Connection":"omniverse://localhost/MCE/3dIcons/API_Connection.usd", "API_Management_service":"omniverse://localhost/MCE/3dIcons/API-management-services.usd", "App_Configuration":"omniverse://localhost/MCE/3dIcons/App-Configuration.usd", "App_Service_plan":"omniverse://localhost/MCE/3dIcons/cube.usda", "App_Service":"omniverse://localhost/MCE/3dIcons/AppServices_1_2.usd", "Application_Insights":"omniverse://localhost/MCE/3dIcons/cube.usda", "Application_gateway":"omniverse://localhost/MCE/3dIcons/Application_Gateway.usd", "Automation_Account":"omniverse://localhost/MCE/3dIcons/automation-accounts.usd", "Availability_test":"omniverse://localhost/MCE/3dIcons/Availability_Test_1.3.usd", "Azure_Cosmos_DB_API_for_MongoDB_account":"omniverse://localhost/MCE/3dIcons/Azure_Cosmos_DB_API_MongoDB.usd", "Azure_Cosmos_DB_account":"omniverse://localhost/MCE/3dIcons/cube.usda", "Azure_Data_Explorer_Cluster":"omniverse://localhost/MCE/3dIcons/cube.usda", "Azure_DevOps_organization":"omniverse://localhost/MCE/3dIcons/azure-devops.usd", "Azure_Machine_Learning":"omniverse://localhost/MCE/3dIcons/Azure_Machine_Learning.usd", "Azure_Workbook":"omniverse://localhost/MCE/3dIcons/azure-workbook.usd", "Bastion":"omniverse://localhost/MCE/3dIcons/Bastion.usd", "Cognitive_Service":"omniverse://localhost/MCE/3dIcons/Cognitive_Services.usd", "Container_registry":"omniverse://localhost/MCE/3dIcons/container-registries.usd", "Data_Lake_Analytics":"omniverse://localhost/MCE/3dIcons/Data_Lake_Analytics_1.2.usd", "Data_Lake_Storage_Gen1":"omniverse://localhost/MCE/3dIcons/data-lake-storage-gen1.usd", "Data_factory__V2_":"omniverse://localhost/MCE/3dIcons/data-factory.usd", "Disk":"omniverse://localhost/MCE/3dIcons/Disk_1.0.usd", "DNS_Zone":"omniverse://localhost/MCE/3dIcons/cube.usda", "Event_Grid_System_Topic":"omniverse://localhost/MCE/3dIcons/event-grid-topics.usd", "Event_Hubs_Namespace":"omniverse://localhost/MCE/3dIcons/events-hub.usd", "Firewall_Policy":"omniverse://localhost/MCE/3dIcons/Firewall_Policy.usd", "Firewall":"omniverse://localhost/MCE/3dIcons/Firewall.usd", "Function_App":"omniverse://localhost/MCE/3dIcons/function-apps.usd", "Image":"omniverse://localhost/MCE/3dIcons/image.usd", "Key_vault":"omniverse://localhost/MCE/3dIcons/Key_Vaults.usd", "Kubernetes_service":"omniverse://localhost/MCE/3dIcons/kubernetess_services_cubes001_Z.usd", "Language_understanding":"omniverse://localhost/MCE/3dIcons/cube.usda", "Load_balancer":"omniverse://localhost/MCE/3dIcons/load-balancer.usd", "Log_Analytics_query_pack":"omniverse://localhost/MCE/3dIcons/cube.usda", "Log_Analytics_workspace":"omniverse://localhost/MCE/3dIcons/Log_Analytics_Workspace.usd", "Logic_App__Standard_":"omniverse://localhost/MCE/3dIcons/logic-apps.usd", "Logic_app":"omniverse://localhost/MCE/3dIcons/Logic_Apps_Std.usd", "Logic_apps_custom_connector":"omniverse://localhost/MCE/3dIcons/Logic_Apps_Custom_Connector.usd", "Managed_Identity":"omniverse://localhost/MCE/3dIcons/cube.usda", "Network_Interface":"omniverse://localhost/MCE/3dIcons/network-interface.usd", "Network_Watcher":"omniverse://localhost/MCE/3dIcons/cube.usda", "Network_security_group":"omniverse://localhost/MCE/3dIcons/network-security-groups.usd", "Power_BI_Embedded":"omniverse://localhost/MCE/3dIcons/Powe_BI_Embedded.usd", "Private_DNS_zone":"omniverse://localhost/MCE/3dIcons/cube.usda", "Private_endpoint":"omniverse://localhost/MCE/3dIcons/cube.usda", "Public_IP_address":"omniverse://localhost/MCE/3dIcons/public-ip-adresses.usd", "Recovery_Services_vault":"omniverse://localhost/MCE/3dIcons/cube.usda", "Restore_Point_Collection":"omniverse://localhost/MCE/3dIcons/Restore_Point_Collection.usd", "Runbook":"omniverse://localhost/MCE/3dIcons/Runbook.usd", "SQL_database":"omniverse://localhost/MCE/3dIcons/SQLServer_6_0.usd", "SQL_elastic_pool":"omniverse://localhost/MCE/3dIcons/SQL_Elastic_Pools.usd", "SQL_server":"omniverse://localhost/MCE/3dIcons/SQLServer.usd", "SQL_virtual_machine":"omniverse://localhost/MCE/3dIcons/sql-virtual-machine.usd", "Search_service":"omniverse://localhost/MCE/3dIcons/cube.usda", "Service_Bus_Namespace":"omniverse://localhost/MCE/3dIcons/service-bus.usd", "Service_Fabric_cluster":"omniverse://localhost/MCE/3dIcons/service-fabric-clusters.usd", "Shared_dashboard":"omniverse://localhost/MCE/3dIcons/cube.usda", "Snapshot":"omniverse://localhost/MCE/3dIcons/Snapshot.usd", "Solution":"omniverse://localhost/MCE/3dIcons/Solution_1.4.usd", "Storage_account":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2.8.usd", "Traffic_Manager_profile":"omniverse://localhost/MCE/3dIcons/traffic-manager-profiles.usd", "Virtual_machine_scale_set":"omniverse://localhost/MCE/3dIcons/Virtual_Machines_Scale_Sets.usd", "Virtual_machine":"omniverse://localhost/MCE/3dIcons/Virtual_Machine.usd", "Virtual_network":"omniverse://localhost/MCE/3dIcons/Virtual_Network.usd", "Web_App_Bot":"omniverse://localhost/MCE/3dIcons/cube.usda", }
5,596
Python
73.626666
114
0.738206
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/config/extension.toml
[package] title = "Meta Cloud Explorer (Azure)" description="An Omniverse scene authoring tool to help visualize your Azure Infrastructure in your own private Metaverse!" version = "2022.1.3" category = "Browsers" authors = ["USDSync.com, MetaCloudExplorer.com - Gavin Stevens"] preview_image = "data/resources/azurescaled.png" icon = "data/resources/meta_cloud_explorer.png" # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" changelog = "docs/CHANGELOG.md" # URL of the extension source repository. repository = "https://github.com/USDSync/MetaCloudExplorer" # Keywords for the extension keywords = ["Azure", "USD Sync", "Cloud Infrastructure", "Visualization", "Scene composition"] # Use omni.ui to build simple UI [dependencies] "omni.kit.uiapp" = {} "omni.ui" = {} "omni.usd" = {} "omni.kit.menu.utils" = {} "omni.kit.window.filepicker" = {} "omni.kit.window.file_importer" = {} [python.pipapi] requirements = [ "pandas", "numpy", "azure-mgmt-resource", "azure-identity", "typing-extensions" ] # Main python module this extension provides, it will be publicly available as "import meta.cloud.explorer.azure". [[python.module]] name = "meta.cloud.explorer.azure"
1,247
TOML
29.439024
122
0.718524
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/docs/CHANGELOG.md
# CHANGELOG ## [2022.3.0] - 2022-11-27 ### Added - update for 2022.3, Requires Omni.Phyx.Commands extension, python library workaround outlined on wiki. ## [2022.1.3-pre] - 2022-09-08 ### Added - updated readme, some issues with release tag pulling main instead. - merging 2022.1.3 with main ## [2022.1.3-pre] - 2022-09-08 ### Added - pre-release ## [2022.1.3.A] - 2022-09-06 ### Added - Initial checkin on this version - compatible with Omniverse Code 2022.1.2 & 2022.1.3 - requires Omni.Viewport.Utility!
513
Markdown
21.347825
103
0.692008
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/docs/README.md
# USDSync.com # Meta Cloud Explorer (MCE) # NVIDIA Onmiverse Extension, a Scene Authoring Tool (In Beta Development phase) Quickly connect to your Cloud Infrastructure and visualize it in your private Omniverse!* This extension generates digital models of your Cloud Infrastructure that can be used to gain insights to drive better infrastructure, optimized resources, reduced costs, and breakthrough customer experiences. Make sure to install the Azure 3D Icon Library! https://github.com/USDSync/MetaCloudExplorer/wiki/Installing-3D-Icon-library-locally 2022.1.3 This version CANNOT CONNECT** to Azure's live Resource Management API! (live connection supported in MCE 2022.1.1) This version requires the Omni.Viewport.Utility extension be installed. This version works fine with sample data and offline data files. This version enables resource Object / Info widgets! This version will become the main branch once 2021.1.3+ supports the azure-identity library. *Compatible with Omniverse code 2022.1.2, 2022.1.3+ **This version was created to enable Object / Info widgets, but using 2022.1.2 and 2022.1.3 causes azure-identity to fail due to: https://forums.developer.nvidia.com/t/pip-library-wont-load-in-2021-1-2/222719/3
1,233
Markdown
46.461537
209
0.801298
mati-nvidia/mc-widget-library/exts/mc.widgets/mc/widgets/extension.py
import omni.ext from .demo import DemoWindow # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class MyExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[mc.widgets] MyExtension startup") self._window = DemoWindow("Demo Window", width=300, height=300) #settings = carb.settings.get_settings() # import fontawesome as fa # print(fa.icons['fa-python']) def on_shutdown(self): print("[mc.widgets] MyExtension shutdown") self._window.destroy() self._window = None
931
Python
43.38095
119
0.693878
mati-nvidia/mc-widget-library/exts/mc.widgets/mc/widgets/__init__.py
from .extension import * from ._widgets import CheckBoxGroup, TabGroup
70
Python
34.499983
45
0.814286
mati-nvidia/omni-code-with-me/README.md
# Code With Me This repository contains NVIDIA Omniverse extensions and projects created during [my live coding sessions on YouTube](https://www.youtube.com/@mati-codes). The goal of these projects and live coding sessions is to teach Omniverse development by example and to show developer workflows for creating Omniverse extensions, connectors and applications. I have created a tag for every past live coding session to record the state of the project at the end of the session. If you want to go back in time and see the exact code from a livestream, use the tags. Feel free to use any examples from these projects and reach out to me if you have any questions. ## Contributing The source code for this repository is provided as-is and we are not accepting outside contributions.
787
Markdown
70.636357
242
0.80305
mati-nvidia/omni-code-with-me/exts/maticodes.tutorial.framework.core/README.md
# Tutorial Framework [maticodes.tutorial.framework.core] A framework for creating in-app tutorials for NVIDIA Omniverse.
122
Markdown
29.749993
63
0.819672
mati-nvidia/omni-code-with-me/exts/maticodes.tutorial.framework.core/maticodes/tutorial/framework/core/extension.py
# SPDX-License-Identifier: Apache-2.0 import omni.ext import omni.ui as ui from .window import TutorialWindow # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class TutorialFrameworkExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[maticodes.tutorial.framework.core] maticodes tutorial framework core startup") self._window = TutorialWindow() def on_shutdown(self): print("[maticodes.tutorial.framework.core] maticodes tutorial framework core shutdown") self._window.destroy() self._window = None
949
Python
36.999999
119
0.734457
mati-nvidia/omni-code-with-me/exts/maticodes.tutorial.framework.core/maticodes/tutorial/framework/core/window.py
# SPDX-License-Identifier: Apache-2.0 import omni.ui as ui class TutorialWindow(ui.Window): def __init__(self): super().__init__("Tutorial", width=300, height=300) self.frame.set_build_fn(self._build_window) step1 = Step(50, "Step 1", "This is step 1") step2 = Step(60, "Step 2", "This is step 2") step3 = Step(70, "Step 3", "This is step 3") self.steps = [step1, step2, step3] self.step_index = 0 def _build_window(self): with ui.VStack(): self.step_frame = ui.Frame() self.step_frame.set_build_fn(self._build_step_frame) with ui.HStack(height=20): ui.Button("Reset", width=0) ui.Spacer() ui.Button("Validate", width=0) with ui.HStack(height=0): def prev_func(): self.step_index -= 1 self.step_frame.rebuild() ui.Button("Previous", clicked_fn=prev_func) ui.Spacer() def next_func(): self.step_index += 1 self.step_frame.rebuild() ui.Button("Next", clicked_fn=next_func) def _build_step_frame(self): step = self.steps[self.step_index] step.build() class Step(): def __init__(self, num_lines=10, title="Step", text="Hello World"): self.num_lines = num_lines self.text = text self.title = title def build_title(self): ui.Label(self.title, height=0, alignment=ui.Alignment.CENTER) def build_content(self): with ui.VStack(): for x in range(self.num_lines): ui.Label(self.text) def build(self): with ui.VStack(): self.build_title() with ui.ScrollingFrame(): self.build_content()
1,878
Python
29.803278
71
0.518104
mati-nvidia/omni-code-with-me/exts/maticodes.tutorial.framework.core/docs/CHANGELOG.md
# Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [0.1.0] - 2023-05-09 - Pre-release version. Still WIP.
155
Markdown
16.333332
80
0.670968
mati-nvidia/omni-code-with-me/exts/maticodes.notify.reminder/README.md
# Save Reminder [maticodes.notify.reminder] A simple extension that reminds you to save your work on a timer. ![](data/notification.png) ## Preferences There are a few preferences that you can set to customize the functionality. You can access the preferences by clicking on Edit->Preferences. ![](data/settings.png)
320
Markdown
31.099997
141
0.775
mati-nvidia/omni-code-with-me/exts/maticodes.notify.reminder/maticodes/notify/reminder/preferences.py
# SPDX-License-Identifier: Apache-2.0 from omni.kit.window.preferences import PreferenceBuilder from omni.kit.widget.settings.settings_widget import SettingType import omni.ui as ui from . import constants as c class SaveReminderPreferences(PreferenceBuilder): def __init__(self): super().__init__("Reminders") def build(self): with ui.VStack(height=0, spacing=5): with self.add_frame("Save Reminder"): with ui.VStack(): # TODO: Is "min" a valid kwarg? self.create_setting_widget( "Enable Save Reminder", c.SAVE_ENABLED_SETTING, SettingType.BOOL, min=0 ) self.create_setting_widget( "Save Reminder Interval (seconds)", c.SAVE_INTERVAL_SETTING, SettingType.INT, min=0 ) self.create_setting_widget( "Save Reminder Message", c.SAVE_MESSAGE_SETTING, SettingType.STRING, min=0 )
1,041
Python
39.076922
107
0.572526
mati-nvidia/omni-code-with-me/exts/maticodes.notify.reminder/maticodes/notify/reminder/constants.py
# SPDX-License-Identifier: Apache-2.0 import carb SAVE_REMINDER_FIRED = carb.events.type_from_string("maticodes.notify.reminder.SAVE_REMINDER_FIRED") SAVE_INTERVAL_SETTING = "/persistent/exts/maticodes.notify.reminder/save/interval" SAVE_ENABLED_SETTING = "/persistent/exts/maticodes.notify.reminder/save/enabled" SAVE_MESSAGE_SETTING = "/persistent/exts/maticodes.notify.reminder/save/message" DEFAULT_SAVE_INTERVAL = 600 DEFAULT_SAVE_MESSAGE = "Hey! Don't forget to save!"
478
Python
38.916663
99
0.792887
mati-nvidia/omni-code-with-me/exts/maticodes.notify.reminder/maticodes/notify/reminder/extension.py
# SPDX-License-Identifier: Apache-2.0 import asyncio import carb import carb.settings import omni.ext import omni.kit.app import omni.kit.notification_manager as nm import omni.kit.window.preferences import omni.usd from . import constants as c from .preferences import SaveReminderPreferences class MaticodesNotifyReminderExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[maticodes.notify.reminder] maticodes notify reminder startup") self.settings = carb.settings.get_settings() self.settings.set_default(c.SAVE_ENABLED_SETTING, True) self.settings.set_default(c.SAVE_INTERVAL_SETTING, c.DEFAULT_SAVE_INTERVAL) self.settings.set_default(c.SAVE_MESSAGE_SETTING, c.DEFAULT_SAVE_MESSAGE) self._preferences_page = omni.kit.window.preferences.register_page(SaveReminderPreferences()) self.bus = omni.kit.app.get_app().get_message_bus_event_stream() self.save_fired_sub = self.bus.create_subscription_to_push_by_type(c.SAVE_REMINDER_FIRED, self.on_save_reminder) asyncio.ensure_future(self.reminder_timer()) def on_save_reminder(self, e: carb.events.IEvent): if self.settings.get_as_bool(c.SAVE_ENABLED_SETTING): message = self.settings.get(c.SAVE_MESSAGE_SETTING) ok_button = nm.NotificationButtonInfo("SAVE", on_complete=self.do_save) cancel_button = nm.NotificationButtonInfo("CANCEL", on_complete=None) notification = nm.post_notification( message, hide_after_timeout=False, duration=0, status=nm.NotificationStatus.WARNING, button_infos=[ok_button, cancel_button]) asyncio.ensure_future(self.reminder_timer()) def do_save(self): def save_finished(arg1, arg2, saved_files): if not saved_files: carb.log_error("No files saved! Are you working in an untitled stage?") else: carb.log_info(f"Saved the files: {saved_files}") omni.usd.get_context().save_stage_with_callback(save_finished) async def reminder_timer(self): await asyncio.sleep(self.settings.get(c.SAVE_INTERVAL_SETTING)) if hasattr(self, "bus"): self.bus.push(c.SAVE_REMINDER_FIRED) def on_shutdown(self): print("[maticodes.notify.reminder] maticodes notify reminder shutdown") self.save_fired_sub = None omni.kit.window.preferences.unregister_page(self._preferences_page) self._preferences_page = None
2,676
Python
44.372881
120
0.692078
mati-nvidia/omni-code-with-me/exts/maticodes.soundboard.window/maticodes/soundboard/window/widgets.py
# SPDX-License-Identifier: Apache-2.0 import asyncio import copy from functools import partial import carb import omni.kit.app import omni.kit.uiaudio from omni import ui from .config import ConfigManager, Settings from .constants import ( DEFAULT_BUTTON_COLOR, EDIT_BAR_HEIGHT, GUTTER_WIDTH, REORDER_EVENT, SOUNDS_CHANGED_EVENT, USER_CONFIG_PATH, ) from .style import get_button_style, slot_style class ButtonSlot: def __init__(self, sound_name, sound, width, height) -> None: self._settings = carb.settings.get_settings() self.edit_sub = self._settings.subscribe_to_node_change_events(Settings.EDIT_MODE, self._on_mode_changed) self.color_sub = None self.sound_name = sound_name self.sound = sound self.width = width self.height = height self._audio_iface = omni.kit.uiaudio.get_ui_audio_interface() self.msg_bus = omni.kit.app.get_app().get_message_bus_event_stream() self.color_model = None self.frame = ui.Frame(width=self.width, height=self.height) self.frame.set_build_fn(self._build_frame) self.button_frame = None def _build_frame(self): with ui.HStack(style=slot_style): if self._settings.get(Settings.EDIT_MODE): with ui.ZStack(width=0, height=0): ui.Rectangle(width=GUTTER_WIDTH, height=EDIT_BAR_HEIGHT, name="edit_bar") with ui.VStack(): def drag(sound_name): return sound_name img = ui.Image( carb.tokens.get_tokens_interface().resolve("${glyphs}/toolbar_move_global.svg"), width=GUTTER_WIDTH, height=GUTTER_WIDTH, ) img.set_drag_fn(lambda: drag(self.sound_name)) with ui.HStack(): ui.Spacer() color = ConfigManager.resolved_config()["sounds_repo"][self.sound_name].get( "color", DEFAULT_BUTTON_COLOR ) self.color_model = ui.ColorWidget(*color, width=0, height=0).model self.color_sub = self.color_model.subscribe_end_edit_fn(self._on_color_changed) ui.Spacer() ui.Button( "", width=GUTTER_WIDTH, height=GUTTER_WIDTH, clicked_fn=self._rename_button, image_url=carb.tokens.get_tokens_interface().resolve("${glyphs}/pencil.svg"), ) ui.Button( "", width=GUTTER_WIDTH, height=GUTTER_WIDTH, clicked_fn=self._remove_button, image_url=carb.tokens.get_tokens_interface().resolve("${glyphs}/trash.svg"), ) self.button_frame = ui.Frame(width=self.width, height=self.height) self.button_frame.set_build_fn(self._build_button) def _rename_button(self): RenameWindow(self.sound_name) def _remove_button(self): active_sounds = copy.deepcopy(ConfigManager.resolved_config()["active_sounds"]) active_sounds.remove(self.sound_name) ConfigManager.user_config["active_sounds"] = active_sounds ConfigManager.save_user_config(USER_CONFIG_PATH) self.msg_bus.push(carb.events.type_from_string(REORDER_EVENT)) def _on_color_changed(self, model, item): sound_data = ConfigManager.resolved_config()["sounds_repo"][self.sound_name] color = [] for child in model.get_item_children(): component = model.get_item_value_model(child) color.append(component.as_float) sound_data["color"] = color[:3] ConfigManager.user_config["sounds_repo"][self.sound_name] = sound_data ConfigManager.save_user_config(USER_CONFIG_PATH) self.button_frame.rebuild() def _build_button(self): color = ConfigManager.resolved_config()["sounds_repo"][self.sound_name].get("color", DEFAULT_BUTTON_COLOR) button_style = get_button_style(color) def on_click(): self._audio_iface.play_sound(self.sound) if self._settings.get(Settings.EDIT_MODE): button = ui.Button( self.sound_name, height=self.height, width=self.width, clicked_fn=on_click, style=button_style ) button.set_accept_drop_fn(self._can_drop) button.set_drop_fn(partial(self._on_drop, button, self.sound_name)) else: ui.Button(self.sound_name, height=self.height, width=self.width, clicked_fn=on_click, style=button_style) def _can_drop(self, path: str) -> bool: return True def _on_drop(self, button, sound_name, e): if sound_name == e.mime_data: return active_sounds = copy.deepcopy(ConfigManager.resolved_config()["active_sounds"]) moved_id = active_sounds.index(e.mime_data) active_sounds.pop(moved_id) insert_index = active_sounds.index(sound_name) button_width = button.computed_width button_pos_x = button.screen_position_x button_center = button_pos_x + button_width / 2 if e.x > button_center: insert_index += 1 active_sounds.insert(insert_index, e.mime_data) ConfigManager.user_config["active_sounds"] = active_sounds self.msg_bus.push(carb.events.type_from_string(REORDER_EVENT)) def _on_mode_changed(self, value, event_type): self.frame.rebuild() class RenameWindow(ui.Window): title = "Rename Sound" def __init__(self, sound_name, **kwargs) -> None: super().__init__(self.title, flags=ui.WINDOW_FLAGS_MODAL, width=200, height=100, **kwargs) self.sound_name = sound_name self.name_model = None self.msg_bus = omni.kit.app.get_app().get_message_bus_event_stream() self.frame.set_build_fn(self._build_frame) def _build_frame(self): with ui.VStack(): self.name_model = ui.StringField(height=0).model self.name_model.set_value(self.sound_name) ui.Spacer(height=10) with ui.HStack(): ui.Button("Ok", height=0, clicked_fn=self._rename_button) ui.Button("Cancel", height=0, clicked_fn=self._close_window) def _close_window(self): async def close_async(): self.destroy() asyncio.ensure_future(close_async()) def _rename_button(self): sounds_repo = copy.deepcopy(ConfigManager.resolved_config()["sounds_repo"]) new_name = self.name_model.as_string if new_name in sounds_repo: return active_sounds = copy.deepcopy(ConfigManager.resolved_config()["active_sounds"]) index = active_sounds.index(self.sound_name) active_sounds.remove(self.sound_name) active_sounds.insert(index, new_name) data = sounds_repo[self.sound_name] user_sounds_repo = copy.deepcopy(ConfigManager.user_config["sounds_repo"]) if self.sound_name in user_sounds_repo: del user_sounds_repo[self.sound_name] ConfigManager.user_config["active_sounds"] = active_sounds user_sounds_repo[new_name] = data ConfigManager.user_config["sounds_repo"] = user_sounds_repo ConfigManager.save_user_config(USER_CONFIG_PATH) self.msg_bus.push(carb.events.type_from_string(SOUNDS_CHANGED_EVENT)) self._close_window() class PaletteSlot: def __init__(self, sound_name, sound, width, height) -> None: self._settings = carb.settings.get_settings() self.sound_name = sound_name self.sound = sound self.width = width self.height = height self._audio_iface = omni.kit.uiaudio.get_ui_audio_interface() self.msg_bus = omni.kit.app.get_app().get_message_bus_event_stream() self.frame = ui.Frame(width=self.width, height=self.height) self.frame.set_build_fn(self._build_frame) def _build_frame(self): color = ConfigManager.resolved_config()["sounds_repo"][self.sound_name].get("color", DEFAULT_BUTTON_COLOR) style = {"background_color": ui.color(*color), "border_radius": 3} with ui.ZStack(): ui.Rectangle(style=style) with ui.HStack(style=slot_style, width=0): ui.Label(self.sound_name, alignment=ui.Alignment.CENTER) with ui.VStack(): ui.Spacer(height=1) with ui.HStack(): def on_play(): self._audio_iface.play_sound(self.sound) ui.Button( "", width=24, height=24, clicked_fn=on_play, image_url=carb.tokens.get_tokens_interface().resolve("${glyphs}/timeline_play.svg"), ) ui.Button( "", width=24, height=24, clicked_fn=self._add_active_sound, image_url=carb.tokens.get_tokens_interface().resolve("${glyphs}/plus.svg"), ) def _add_active_sound(self): active_sounds = copy.deepcopy(ConfigManager.resolved_config()["active_sounds"]) if self.sound_name not in active_sounds: active_sounds.append(self.sound_name) ConfigManager.user_config["active_sounds"] = active_sounds ConfigManager.save_user_config(USER_CONFIG_PATH) self.msg_bus.push(carb.events.type_from_string(SOUNDS_CHANGED_EVENT))
10,065
Python
41.652542
117
0.567213
mati-nvidia/omni-code-with-me/exts/maticodes.soundboard.window/maticodes/soundboard/window/config.py
# SPDX-License-Identifier: Apache-2.0 import copy import json import os from pathlib import Path import carb.settings import carb.tokens import omni.kit.app class ConfigManager: default_config = {} user_config = {} default_config_path = "data/default_config.json" @classmethod def _read_json(cls, filepath): with open(filepath, "r") as f: data = json.load(f) return data @classmethod def load_default_config(cls, ext_id): manager = omni.kit.app.get_app().get_extension_manager() ext_root_path = Path(manager.get_extension_path(ext_id)) filepath = ext_root_path / cls.default_config_path data = cls._read_json(filepath) for sound_name in data["sounds_repo"]: abs_path = Path(ext_root_path) / data["sounds_repo"][sound_name]["uri"] data["sounds_repo"][sound_name]["uri"] = str(abs_path) cls.default_config = data @classmethod def load_user_config(cls, filepath: Path): filepath = cls._resolve_path(filepath) if not os.path.exists(filepath): os.makedirs(os.path.dirname(filepath), exist_ok=True) else: data = cls._read_json(filepath) cls.user_config = data if not cls.user_config: cls.user_config = {"sounds_repo": {}} @classmethod def save_user_config(cls, filepath: Path): filepath = cls._resolve_path(filepath) with open(filepath, "w") as f: json.dump(cls.user_config, f, indent=4) @classmethod def _resolve_path(cls, filepath): return carb.tokens.get_tokens_interface().resolve(str(filepath)) @classmethod def resolved_config(cls): resolved_config = {} resolved_config.update(cls.default_config) resolved_config.update(cls.user_config) sounds_repo = copy.deepcopy(cls.default_config["sounds_repo"]) sounds_repo.update(cls.user_config["sounds_repo"]) resolved_config["sounds_repo"] = sounds_repo return resolved_config class TransientSettings: EDIT_MODE = "/exts/maticodes.soundboard.window/edit_mode" class PersistentSettings: BUTTON_WIDTH = "/exts/maticodes.soundboard.window/button_width" @classmethod def get_persistent_keys(cls): attrs = [attr for attr in dir(cls) if not callable(getattr(cls, attr)) and not attr.startswith("__")] return [getattr(cls, attr) for attr in attrs] class Settings(PersistentSettings, TransientSettings): pass class SettingsManager: PERSISTENT = "/persistent" def __init__(self): self._settings = carb.settings.get_settings() self._load_settings() def _load_settings(self): for key in PersistentSettings.get_persistent_keys(): value = self._settings.get(self.PERSISTENT + key) if value is not None: self._settings.set(key, value) def save_settings(self): for key in PersistentSettings.get_persistent_keys(): value = self._settings.get(key) self._settings.set(self.PERSISTENT + key, value) @property def settings(self): return self._settings
3,203
Python
28.943925
109
0.632532
mati-nvidia/omni-code-with-me/exts/maticodes.soundboard.window/maticodes/soundboard/window/style.py
# SPDX-License-Identifier: Apache-2.0 import omni.ui as ui slot_style = { "ColorWidget": { "margin_height": 5 }, "Rectangle::edit_bar": { "background_color": ui.color(.04), "border_radius": 7 } } def get_button_style(color): return { "": { "background_color": ui.color(*[c * 0.5 for c in color]), "background_gradient_color": ui.color(*color) }, ":hovered": { "background_color": ui.color(*[c * 0.75 for c in color]), "background_gradient_color": ui.color(*[c * 1.1 for c in color]), }, }
619
Python
23.799999
77
0.513732
mati-nvidia/omni-code-with-me/exts/maticodes.soundboard.window/maticodes/soundboard/window/constants.py
# SPDX-License-Identifier: Apache-2.0 from pathlib import Path import carb REORDER_EVENT = "maticodes.soundboard.window.BUTTONS_REORDERED" SOUNDS_CHANGED_EVENT = "maticodes.soundboard.window.SOUNDS_CHANGED" DATA_DIR = Path("${omni_data}/exts/maticodes.soundboard.window") USER_CONFIG_PATH = DATA_DIR / "user.config" GUTTER_WIDTH = 24 EDIT_BAR_HEIGHT = 110 DEFAULT_BUTTON_COLOR = (0.15, 0.15, 0.15)
403
Python
24.249998
67
0.754342
mati-nvidia/omni-code-with-me/exts/maticodes.soundboard.window/maticodes/soundboard/window/extension.py
# SPDX-License-Identifier: Apache-2.0 from .config import SettingsManager import omni.ext import omni.kit.app from .window import Soundboard # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class SoundboardWindowExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. MENU_PATH = "Window/Soundboard" def on_startup(self, ext_id): self.ext_id = ext_id self.settings_mgr = SettingsManager() editor_menu = omni.kit.ui.get_editor_menu() self._window = None self._menu = editor_menu.add_item(SoundboardWindowExtension.MENU_PATH, self._on_menu_click, toggle=True, value=True) self.show_window(True) def _on_menu_click(self, menu, toggled): self.show_window(toggled) def show_window(self, toggled): if toggled: if self._window is None: self._window = Soundboard("Soundboard", self.ext_id, width=800, height=500) self._window.set_visibility_changed_fn(self._visibility_changed_fn) else: self._window.show() else: if self._window is not None: self._window.hide() def _visibility_changed_fn(self, visible: bool): """Toggles window visibility and Window menu item checked state. Args: visible (bool): Whether the window is visible or not """ if self._menu: omni.kit.ui.get_editor_menu().set_value(SoundboardWindowExtension.MENU_PATH, visible) self.show_window(visible) def on_shutdown(self): if self._menu: omni.kit.ui.get_editor_menu().remove_item(self._menu) self.settings_mgr.save_settings() if self._window is not None: self._window.destroy() self._window = None
2,157
Python
35.576271
119
0.639777
mati-nvidia/omni-code-with-me/exts/maticodes.soundboard.window/maticodes/soundboard/window/window.py
# SPDX-License-Identifier: Apache-2.0 import copy import shutil from functools import partial from pathlib import Path from typing import List import carb.tokens import omni.kit.app import omni.kit.uiaudio import omni.ui as ui from omni.kit.window.drop_support import ExternalDragDrop from .config import ConfigManager, Settings from .constants import DATA_DIR, REORDER_EVENT, SOUNDS_CHANGED_EVENT, USER_CONFIG_PATH, GUTTER_WIDTH from .widgets import ButtonSlot, PaletteSlot class Soundboard(ui.Window): def __init__(self, title, ext_id, **kwargs): super().__init__(title, **kwargs) self.ext_id = ext_id self._sounds = {} self._buttons_frame = None self._slider_sub = None ConfigManager.load_default_config(self.ext_id) ConfigManager.load_user_config(USER_CONFIG_PATH) self._audio_iface = omni.kit.uiaudio.get_ui_audio_interface() self._load_sounds() self._settings = carb.settings.get_settings() self._settings_sub = self._settings.subscribe_to_node_change_events( Settings.BUTTON_WIDTH, self._on_settings_changed ) self._edit_sub = self._settings.subscribe_to_node_change_events(Settings.EDIT_MODE, self._on_edit_changed) self._external_drag_and_drop = None bus = omni.kit.app.get_app().get_message_bus_event_stream() reorder_event_type = carb.events.type_from_string(REORDER_EVENT) self._reorder_sub = bus.create_subscription_to_push_by_type(reorder_event_type, self._on_reorder) self._sounds_changed_sub = bus.create_subscription_to_push_by_type( carb.events.type_from_string(SOUNDS_CHANGED_EVENT), self._on_sounds_changed ) self.frame.set_build_fn(self._build_window) def _on_reorder(self, e): self._buttons_frame.rebuild() def _on_sounds_changed(self, e): self._load_sounds() self._buttons_frame.rebuild() def _on_edit_changed(self, item, event_type): self.frame.rebuild() def _build_window(self): edit_mode = self._settings.get(Settings.EDIT_MODE) with ui.VStack(): with ui.HStack(height=0): if edit_mode: ui.Label("Button Width: ", width=0) def slider_changed(model): self._settings.set(Settings.BUTTON_WIDTH, model.as_float) model = ui.SimpleFloatModel(self._settings.get(Settings.BUTTON_WIDTH)) self._slider_sub = model.subscribe_value_changed_fn(slider_changed) ui.FloatSlider(model, min=50, max=400, step=1) ui.Spacer() def set_edit_mode(button): button.checked = not button.checked if not button.checked: ConfigManager.save_user_config(USER_CONFIG_PATH) self._settings.set(Settings.EDIT_MODE, button.checked) button = ui.Button(text="Edit", height=0, width=0, checked=edit_mode) button.set_clicked_fn(partial(set_edit_mode, button)) self._buttons_frame = ui.Frame() self._buttons_frame.set_build_fn(self._build_buttons_frame) def _build_buttons_frame(self): button_width = self._settings.get(Settings.BUTTON_WIDTH) edit_mode = self._settings.get(Settings.EDIT_MODE) gutter_offset = GUTTER_WIDTH if edit_mode else 0 with ui.VStack(): with ui.ScrollingFrame( horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF, vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON, ): with ui.VGrid(column_width=button_width + gutter_offset): for sound_name in ConfigManager.resolved_config()["active_sounds"]: ButtonSlot(sound_name, self._sounds[sound_name], width=button_width, height=button_width) if edit_mode: with ui.ScrollingFrame( horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON, vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF, height=75, ): with ui.ZStack(): ui.Rectangle(style={"background_color": ui.color.black}) with ui.VStack(): # ui.Spacer(height=5) with ui.HStack(style={"margin": 2}): for sound_name, sound_data in ConfigManager.resolved_config()[ "sounds_repo" ].items(): sound = self._load_sound(sound_data["uri"]) PaletteSlot(sound_name, sound, width=button_width, height=50) if self._external_drag_and_drop: self._external_drag_and_drop.destroy() self._external_drag_and_drop = None self._external_drag_and_drop = ExternalDragDrop( window_name=self.title, drag_drop_fn=self._on_ext_drag_drop ) elif self._external_drag_and_drop: self._external_drag_and_drop.destroy() self._external_drag_and_drop = None def _load_sounds(self): self._sounds = {} for sound_name in ConfigManager.resolved_config()["active_sounds"]: sound_data = ConfigManager.resolved_config()["sounds_repo"][sound_name] sound = self._load_sound(sound_data["uri"]) self._sounds[sound_name] = sound def _load_sound(self, filepath): return self._audio_iface.create_sound(filepath) def _on_settings_changed(self, item, event_type): if self._buttons_frame: self._buttons_frame.rebuild() def _on_ext_drag_drop(self, edd: ExternalDragDrop, payload: List[str]): paths = edd.expand_payload(payload) if paths: for p in paths: filepath = Path(p) if filepath.suffix in [".mp3", ".wav"]: dest = DATA_DIR / filepath.name dest = carb.tokens.get_tokens_interface().resolve(str(dest)) shutil.copy(filepath, dest) self._sounds[filepath.stem] = self._load_sound(dest) self._add_sound_to_config(filepath.stem, dest) ConfigManager.save_user_config(USER_CONFIG_PATH) self.frame.rebuild() def _add_sound_to_config(self, sound_name, file_path): active_sounds = copy.deepcopy(ConfigManager.resolved_config()["active_sounds"]) active_sounds.append(sound_name) ConfigManager.user_config["active_sounds"] = active_sounds if not ConfigManager.user_config.get("sounds_repo"): ConfigManager.user_config["sounds_repo"] = {} ConfigManager.user_config["sounds_repo"][sound_name] = {"uri": file_path} def show(self): self.visible = True def hide(self): self.visible = False def destroy(self) -> None: self._slider_sub = None if self._settings_sub: self._settings.unsubscribe_to_change_events(self._settings_sub) self._settings_sub = None if self._edit_sub: self._settings.unsubscribe_to_change_events(self._edit_sub) self._edit_sub = None if self._reorder_sub: self._reorder_sub.unsubscribe() self._reorder_sub = None if self._sounds_changed_sub: self._sounds_changed_sub.unsubscribe() self._sounds_changed_sub = None if self._external_drag_and_drop: self._external_drag_and_drop.destroy() self._external_drag_and_drop = None if self._buttons_frame: self._buttons_frame.destroy() self._buttons_frame = None super().destroy()
8,034
Python
41.739361
114
0.582898
mati-nvidia/omni-code-with-me/exts/maticodes.soundboard.window/maticodes/soundboard/window/scripts/create_default_config.py
# SPDX-License-Identifier: Apache-2.0 import json from pathlib import Path config_path = Path(__file__).parent.parent.parent.parent / "data" / "default_config.json" sounds = { "Applause": { "uri": "data/sounds/applause.wav" }, "Door Bell": { "uri": "data/sounds/door_bell.wav" }, "Door Bell": { "uri": "data/sounds/door_bell.wav" }, "Crowd Laughing": { "uri": "data/sounds/laugh_crowd.mp3" }, "Crowd Sarcastic Laughing": { "uri": "data/sounds/laugh_crowd_sarcastic.wav" }, "Level Complete": { "uri": "data/sounds/level_complete.wav" }, "Ooooh Yeah": { "uri": "data/sounds/oh_yeah.wav" }, "Pew Pew": { "uri": "data/sounds/pew_pew.wav" }, "Phone Ringing 1": { "uri": "data/sounds/phone_ring_analog.wav" }, "Phone Ringing 2": { "uri": "data/sounds/phone_ringing_digital.wav" }, "Rooster Crowing": { "uri": "data/sounds/rooster_crowing.wav" }, "Thank you": { "uri": "data/sounds/thank_you.wav" }, "Timer": { "uri": "data/sounds/timer.wav" }, "Woohoo": { "uri": "data/sounds/woohoo.wav" }, "Yes Scream": { "uri": "data/sounds/yes_scream.wav" }, } config = { "active_sounds": [key for key in sounds], "sounds_repo": sounds } with open(config_path, "w") as f: json.dump(config, f, indent=4)
1,441
Python
21.888889
89
0.536433
mati-nvidia/omni-code-with-me/exts/maticodes.soundboard.window/config/extension.toml
[package] # Semantic Versioning is used: https://semver.org/ version = "1.0.0" # The title and description fields are primarily for displaying extension info in UI title = "Soundboard Window" description="A sound effects soundboard. Just for fun and for learning." # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" # URL of the extension source repository. repository = "https://github.com/mati-nvidia/omni-code-with-me" # One of categories for UI. category = "Audio" # Keywords for the extension keywords = ["kit", "sound", "example", "audio"] # Icon to show in the extension manager icon = "data/icon.png" # Preview to show in the extension manager preview_image = "data/soundboard_preview.png" # Use omni.ui to build simple UI [dependencies] "omni.kit.uiapp" = {} "omni.uiaudio" = {} "omni.kit.window.drop_support" = {} # Main python module this extension provides, it will be publicly available as "import maticodes.soundboard.window". [[python.module]] name = "maticodes.soundboard.window" [settings] exts."maticodes.soundboard.window".button_width = 300 exts."maticodes.soundboard.window".edit_mode = false [[test]] # Extra dependencies only to be used during test run dependencies = [ "omni.kit.ui_test" # UI testing extension ]
1,301
TOML
27.304347
116
0.737125
mati-nvidia/omni-code-with-me/exts/maticodes.soundboard.window/docs/README.md
# Soundboard Window This is a simple sound effects soundboard extension. Have some fun with it and use it for learning Kit. Sound of the concepts that you can learn from the Code With Me recordings and this repository: - Drag and Drop - External Drag and Drop - Modal Window - Using persistent and transient carb.settings - Using carb.tokens - Custom events ## Performance Mode ![](../data/soundboard_preview.png) This mode is just a window with a grid of buttons to have a simple UX for playing sounds. Click on a button to play its sound. You can click on the "Edit" button to switch to "Edit" mode. ## Edit Mode ![](../data/edit_mode.png) In this mode, you can: - Resize the buttons - Drag and drop from the move handle to reorder the buttons. - Assign custom colors to the buttons - Rename buttons - Remove buttons - Preview sounds from the Sounds Palette at the bottom of the window - Add buttons from the Sounds Palette - Add a new sound to the active sounds and Sounds Palette by dragging a sound file into the window. ## Included Sound Files The included sound files in this extension are CC0 from [freesound.org](https://freesound.org).
1,151
Markdown
38.724137
187
0.762815
mati-nvidia/omni-code-with-me/exts/maticodes.layers.mute/maticodes/layers/mute/extension.py
import carb import omni.ext from .window import LayerMuteWindow class LayersVisibilityExtension(omni.ext.IExt): def on_startup(self, ext_id): self._window = LayerMuteWindow("Layers Mute Window", width=300, height=300) def on_shutdown(self): self._window.destroy()
292
Python
21.53846
83
0.712329
mati-nvidia/omni-code-with-me/exts/maticodes.layers.mute/maticodes/layers/mute/window.py
from functools import partial import carb import omni.kit.commands import omni.kit.usd.layers as usd_layers import omni.ui as ui import omni.usd class LayerMuteWindow(ui.Window): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Using ui.Frame.set_build_fn() provides a function to be called with ui.Frame.rebuild() self.frame.set_build_fn(self.build_frame) # Using the same interfaces as Layers window self.layers: usd_layers.Layers = usd_layers.get_layers() self.layers_state: usd_layers.LayersState = self.layers.get_layers_state() self._event_subs = [] events_stream = omni.usd.get_context().get_stage_event_stream() self._event_subs.append( events_stream.create_subscription_to_pop_by_type( omni.usd.StageEventType.OPENED, self._on_stage_opened ) ) self._event_subs.append( self.layers.get_event_stream().create_subscription_to_pop(self._on_layers_changed) ) def build_frame(self): with ui.ScrollingFrame(): with ui.VStack(): layer_ids = self.layers_state.get_local_layer_identifiers() if len(layer_ids) < 2: ui.Label("There are currently no sublayers in this Stage.", alignment=ui.Alignment.CENTER) for layer_id in layer_ids: layer_name = self.layers_state.get_layer_name(layer_id) # Skip the root layer since it can't be muted. if layer_name != "Root Layer": is_muted = self.layers_state.is_layer_locally_muted(layer_id) button = ui.Button(layer_name, height=25, checked=not is_muted) button.set_clicked_fn(partial(self._on_clicked, layer_id, button)) def _on_clicked(self, layer_id, button): button.checked = not button.checked # Using the Kit command allows users to undo the change with Ctrl+Z omni.kit.commands.execute("SetLayerMuteness", layer_identifier=layer_id, muted=not button.checked) def _on_stage_opened(self, event: carb.events.IEvent): # If user changes stages, rebuild the window with a new list of layers. self.frame.rebuild() def _on_layers_changed(self, event: carb.events.IEvent): # If layers are added, removed or layer muteness changed. self.frame.rebuild() def destroy(self) -> None: for sub in self._event_subs: sub.unsubscribe() return super().destroy()
2,596
Python
40.887096
110
0.616333
mati-nvidia/omni-code-with-me/exts/maticodes.layers.mute/docs/README.md
# Layers Mute Window [maticodes.layers.mute] A simple extension showing how to create a UI to replicate the mute functionality of the Layers Window.
150
Markdown
36.749991
103
0.8
mati-nvidia/omni-code-with-me/scripts/create_blenshapes.py
# SPDX-License-Identifier: Apache-2.0 import math from pxr import Usd, UsdGeom, UsdSkel, Gf, Vt stage = Usd.Stage.CreateNew("blendshapes.usda") xform: UsdGeom.Xform = UsdGeom.Xform.Define(stage, "/World") stage.SetDefaultPrim(xform.GetPrim()) stage.SetStartTimeCode(1.0) stage.SetEndTimeCode(17.0) UsdGeom.SetStageMetersPerUnit(stage, 0.01) UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) eq_tri_height = math.sqrt(3) * 1/2 skel_root = UsdSkel.Root.Define(stage, "/World/MorphingTri") extents_anim = { 1: [(-0.5, 0, 0), (0.5, eq_tri_height, 0)], 5: [(-0.5, 0, 0), (0.5, 2* eq_tri_height, 0)], 9: [(-0.5, 0, 0), (0.5, eq_tri_height, 0)], 13: [(-0.5, 0, 0), (0.5, eq_tri_height + (1-eq_tri_height), 0)], 17: [(-0.5, 0, 0), (0.5, 2*eq_tri_height + (1-eq_tri_height), 0)], } extents_attr = skel_root.CreateExtentAttr() for timesample, value in extents_anim.items(): extents_attr.Set(value, timesample) # Skeleton is required even if it's empty skel = UsdSkel.Skeleton.Define(stage, skel_root.GetPath().AppendChild("Skel")) mesh = UsdGeom.Mesh.Define(stage, "/World/MorphingTri/Mesh") mesh_binding = UsdSkel.BindingAPI.Apply(mesh.GetPrim()) # This binding could go on SkelRoot too. It will inherit down to the mesh. mesh_binding.CreateSkeletonRel().SetTargets([skel.GetPath()]) points = Vt.Vec3fArray([ (0.5, 0, 0), (0, eq_tri_height, 0), (-0.5, 0, 0) ]) face_vert_indices = [ 0, 1, 2 ] face_vert_counts = [3] mesh.CreatePointsAttr(points) mesh.CreateFaceVertexIndicesAttr(face_vert_indices) mesh.CreateFaceVertexCountsAttr(face_vert_counts) iso = UsdSkel.BlendShape.Define(stage, mesh.GetPath().AppendChild("iso")) iso.CreateOffsetsAttr().Set([(0, eq_tri_height, 0)]) iso.CreatePointIndicesAttr().Set([1]) right = UsdSkel.BlendShape.Define(stage, mesh.GetPath().AppendChild("right")) right.CreateOffsetsAttr().Set([(-0.5, 1-eq_tri_height, 0)]) right.CreatePointIndicesAttr().Set([1]) mesh_binding = UsdSkel.BindingAPI.Apply(mesh.GetPrim()) mesh_binding.CreateBlendShapesAttr().Set(["iso", "right"]) mesh_binding.CreateBlendShapeTargetsRel().SetTargets([iso.GetPath(), right.GetPath()]) # anim = UsdSkel.Animation.Define(stage, skel_root.GetPath().AppendChild("Anim")) # anim.CreateBlendShapesAttr().Set(["right", "iso"]) # anim.CreateBlendShapeWeightsAttr().Set([1.0, 2.0]) anim = UsdSkel.Animation.Define(stage, skel_root.GetPath().AppendChild("Anim")) anim.CreateBlendShapesAttr().Set(["right", "iso"]) # Frame 1 anim.CreateBlendShapeWeightsAttr().Set([0.0, 0.0], 1.0) # Frame 5 anim.CreateBlendShapeWeightsAttr().Set([0.0, 1.0], 5.0) # Frame 9 anim.CreateBlendShapeWeightsAttr().Set([0.0, 0.0], 9.0) # Frame 13 anim.CreateBlendShapeWeightsAttr().Set([1.0, 0.0], 13.0) # Frame 17 anim.CreateBlendShapeWeightsAttr().Set([1.0, 1.0], 17.0) root_binding = UsdSkel.BindingAPI.Apply(skel_root.GetPrim()) root_binding.CreateAnimationSourceRel().AddTarget(anim.GetPath()) stage.Save()
2,929
Python
32.295454
86
0.707067
mati-nvidia/omni-code-with-me/scripts/basic_skel.py
# SPDX-License-Identifier: Apache-2.0 from pxr import Usd, UsdGeom, UsdSkel, Gf, Vt stage = Usd.Stage.CreateNew("test_arm.usda") xform: UsdGeom.Xform = UsdGeom.Xform.Define(stage, "/World") stage.SetDefaultPrim(xform.GetPrim()) stage.SetStartTimeCode(1.0) stage.SetEndTimeCode(10.0) UsdGeom.SetStageMetersPerUnit(stage, 0.01) UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) skel_root = UsdSkel.Root.Define(stage, "/World/Arm") # Create Mesh mesh = UsdGeom.Mesh.Define(stage, "/World/Arm/Mesh") points = Vt.Vec3fArray([ # Hand (0.5, -0.5, 4), (-0.5, -0.5, 4), (0.5, 0.5, 4), (-0.5, 0.5, 4), # Shoulder (-0.5, -0.5, 0), (0.5, -0.5, 0), (-0.5, 0.5, 0), (0.5, 0.5, 0), # Elbow (-0.5, 0.5, 2), (0.5, 0.5, 2), (0.5, -0.5, 2), (-0.5, -0.5, 2) ]) face_vert_indices = [ 2, 3, 1, 0, 6, 7, 5, 4, 8, 9, 7, 6, 3, 2, 9, 8, 10, 11, 4, 5, 0, 1, 11, 10, 7, 9, 10, 5, 9, 2, 0, 10, 3, 8, 11, 1, 8, 6, 4, 11 ] face_vert_counts = [4] * 10 mesh.CreatePointsAttr(points) mesh.CreateFaceVertexIndicesAttr(face_vert_indices) mesh.CreateFaceVertexCountsAttr(face_vert_counts) skeleton = UsdSkel.Skeleton.Define(stage, "/World/Arm/Rig") joints = ["Shoulder", "Shoulder/Elbow", "Shoulder/Elbow/Hand"] skeleton.CreateJointsAttr(joints) # World space xforms. Who cares about my parents? bind_xforms = Vt.Matrix4dArray([ Gf.Matrix4d((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1)), Gf.Matrix4d((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,2,1)), Gf.Matrix4d((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,4,1)) ]) skeleton.CreateBindTransformsAttr(bind_xforms) # Local space xforms. What's my offset from my parent joint? rest_xforms = Vt.Matrix4dArray([ Gf.Matrix4d((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1)), Gf.Matrix4d((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,2,1)), Gf.Matrix4d((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,2,1)) ]) skeleton.CreateRestTransformsAttr(rest_xforms) # TODO: Do I need to apply BindingAPI to skel root? binding_root = UsdSkel.BindingAPI.Apply(skel_root.GetPrim()) binding_mesh = UsdSkel.BindingAPI.Apply(mesh.GetPrim()) binding_skel = UsdSkel.BindingAPI.Apply(skeleton.GetPrim()) binding_mesh.CreateSkeletonRel().SetTargets([skeleton.GetPath()]) # This is index in joints property for each vertex. joint_indices = binding_mesh.CreateJointIndicesPrimvar(False, 1) joint_indices.Set([2,2,2,2, 0,0,0,0, 1,1,1,1]) joint_weights = binding_mesh.CreateJointWeightsPrimvar(False, 1) joint_weights.Set([1,1,1,1, 1,1,1,1, 1,1,1,1]) identity = Gf.Matrix4d().SetIdentity() binding_mesh.CreateGeomBindTransformAttr(identity) skel_anim = UsdSkel.Animation.Define(stage, skeleton.GetPath().AppendPath("Anim")) binding_skel.CreateAnimationSourceRel().SetTargets([skel_anim.GetPath()]) skel_anim.CreateJointsAttr().Set(["Shoulder/Elbow"]) skel_anim.CreateTranslationsAttr().Set(Vt.Vec3fArray([(0.0, 0.0, 2.0)])) skel_anim.CreateScalesAttr().Set(Vt.Vec3hArray([(1.0, 1.0, 1.0)])) rot_attr = skel_anim.CreateRotationsAttr() rot_anim = { 1: [(1.0,0.0,0.0,0.0)], 10: [(0.7071, 0.7071, 0.0, 0.0)] } for key in rot_anim: values = rot_anim[key] quats = [] for value in values: quats.append(Gf.Quatf(*value)) rot_attr.Set(Vt.QuatfArray(quats), key) stage.Save()
3,222
Python
33.655914
82
0.655183
mati-nvidia/omni-code-with-me/scripts/create_prvw_surf.py
# SPDX-License-Identifier: Apache-2.0 import omni.usd from pxr import UsdShade, UsdGeom, Sdf stage = omni.usd.get_context().get_stage() UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) billboard = UsdGeom.Mesh.Define(stage, "/World/billboard") billboard.CreatePointsAttr([(-100, -100, 0), (100, -100, 0), (100, 100, 0), (-100, 100,0)]) billboard.CreateFaceVertexCountsAttr([4]) billboard.CreateFaceVertexIndicesAttr([0, 1, 2, 3]) billboard.CreateExtentAttr([(-100, -100, 0), (100, 100, 0)]) tex_coords = UsdGeom.PrimvarsAPI(billboard).CreatePrimvar("st", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.varying) tex_coords.Set([(0, 0), (1, 0), (1, 1), (0, 1)]) material = UsdShade.Material.Define(stage, "/World/PreviewMtl") pbr_shader = UsdShade.Shader.Define(stage, "/World/PreviewMtl/PBRShader") pbr_shader.CreateIdAttr("UsdPreviewSurface") pbr_shader.CreateInput("roughness", Sdf.ValueTypeNames.Float).Set(0.4) pbr_shader.CreateInput("occlusion", Sdf.ValueTypeNames.Float) pbr_shader.CreateInput("displacement", Sdf.ValueTypeNames.Float) pbr_shader.CreateInput("normal", Sdf.ValueTypeNames.Normal3f) pbr_shader.CreateInput("metallic", Sdf.ValueTypeNames.Float).Set(0.0) pbr_shader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Color3f).Set((1.0, 0.0, 0.0)) material.CreateSurfaceOutput().ConnectToSource(pbr_shader.ConnectableAPI(), "surface") material.CreateDisplacementOutput().ConnectToSource(pbr_shader.ConnectableAPI(), "displacement") st_reader = UsdShade.Shader.Define(stage, '/World/PreviewMtl/st_reader') st_reader.CreateIdAttr('UsdPrimvarReader_float2') transform2d = UsdShade.Shader.Define(stage, '/World/PreviewMtl/transform2d') transform2d.CreateIdAttr("UsdTransform2d") transform2d.CreateInput("in", Sdf.ValueTypeNames.Float2).ConnectToSource(st_reader.ConnectableAPI(), 'result') transform2d.CreateInput("rotation", Sdf.ValueTypeNames.Float) transform2d.CreateInput("scale", Sdf.ValueTypeNames.Float2).Set((1.0, 1.0)) transform2d.CreateInput("translation", Sdf.ValueTypeNames.Float2) # Diffuse diffuseTextureSampler = UsdShade.Shader.Define(stage,'/World/PreviewMtl/diffuseTexture') diffuseTextureSampler.CreateIdAttr('UsdUVTexture') diffuseTextureSampler.CreateInput('file', Sdf.ValueTypeNames.Asset).Set("C:/Users/mcodesal/Downloads/Ground062S_1K-PNG/Ground062S_1K_Color.png") diffuseTextureSampler.CreateInput("st", Sdf.ValueTypeNames.Float2).ConnectToSource(transform2d.ConnectableAPI(), 'result') diffuseTextureSampler.CreateInput("wrapS", Sdf.ValueTypeNames.Token).Set("repeat") diffuseTextureSampler.CreateInput("wrapT", Sdf.ValueTypeNames.Token).Set("repeat") diffuseTextureSampler.CreateOutput('rgb', Sdf.ValueTypeNames.Float3) pbr_shader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Color3f).ConnectToSource(diffuseTextureSampler.ConnectableAPI(), 'rgb') # Roughness roughTextureSampler = UsdShade.Shader.Define(stage,'/World/PreviewMtl/roughnessTexture') roughTextureSampler.CreateIdAttr('UsdUVTexture') roughTextureSampler.CreateInput('file', Sdf.ValueTypeNames.Asset).Set("C:/Users/mcodesal/Downloads/Ground062S_1K-PNG/Ground062S_1K_Roughness.png") roughTextureSampler.CreateInput("st", Sdf.ValueTypeNames.Float2).ConnectToSource(transform2d.ConnectableAPI(), 'result') roughTextureSampler.CreateInput("wrapS", Sdf.ValueTypeNames.Token).Set("repeat") roughTextureSampler.CreateInput("wrapT", Sdf.ValueTypeNames.Token).Set("repeat") roughTextureSampler.CreateOutput('r', Sdf.ValueTypeNames.Float) pbr_shader.CreateInput("roughness", Sdf.ValueTypeNames.Color3f).ConnectToSource(roughTextureSampler.ConnectableAPI(), 'r') # AO ao = UsdShade.Shader.Define(stage,'/World/PreviewMtl/aoTexture') ao.CreateIdAttr('UsdUVTexture') ao.CreateInput('file', Sdf.ValueTypeNames.Asset).Set("C:/Users/mcodesal/Downloads/Ground062S_1K-PNG/Ground062S_1K_AmbientOcclusion.png") ao.CreateInput("st", Sdf.ValueTypeNames.Float2).ConnectToSource(transform2d.ConnectableAPI(), 'result') ao.CreateInput("wrapS", Sdf.ValueTypeNames.Token).Set("repeat") ao.CreateInput("wrapT", Sdf.ValueTypeNames.Token).Set("repeat") ao.CreateOutput('r', Sdf.ValueTypeNames.Float) pbr_shader.CreateInput("occlusion", Sdf.ValueTypeNames.Color3f).ConnectToSource(ao.ConnectableAPI(), 'r') # Displacement displace = UsdShade.Shader.Define(stage,'/World/PreviewMtl/displaceTexture') displace.CreateIdAttr('UsdUVTexture') displace.CreateInput('file', Sdf.ValueTypeNames.Asset).Set("C:/Users/mcodesal/Downloads/Ground062S_1K-PNG/Ground062S_1K_Displacement.png") displace.CreateInput("st", Sdf.ValueTypeNames.Float2).ConnectToSource(transform2d.ConnectableAPI(), 'result') displace.CreateInput("wrapS", Sdf.ValueTypeNames.Token).Set("repeat") displace.CreateInput("wrapT", Sdf.ValueTypeNames.Token).Set("repeat") displace.CreateOutput('r', Sdf.ValueTypeNames.Float) pbr_shader.CreateInput("displacement", Sdf.ValueTypeNames.Color3f).ConnectToSource(displace.ConnectableAPI(), 'r') # Normal normal = UsdShade.Shader.Define(stage,'/World/PreviewMtl/normalTexture') normal.CreateIdAttr('UsdUVTexture') normal.CreateInput('file', Sdf.ValueTypeNames.Asset).Set("C:/Users/mcodesal/Downloads/Ground062S_1K-PNG/Ground062S_1K_NormalGL.png") normal.CreateInput("st", Sdf.ValueTypeNames.Float2).ConnectToSource(transform2d.ConnectableAPI(), 'result') normal.CreateInput("wrapS", Sdf.ValueTypeNames.Token).Set("repeat") normal.CreateInput("wrapT", Sdf.ValueTypeNames.Token).Set("repeat") normal.CreateInput("bias", Sdf.ValueTypeNames.Float4).Set((-1, -1, -1, 0)) normal.CreateInput("scale", Sdf.ValueTypeNames.Float4).Set((2.0, 2.0, 2.0, 1.0)) normal.CreateInput("sourceColorSpace", Sdf.ValueTypeNames.Token).Set("raw") normal.CreateOutput('rgb', Sdf.ValueTypeNames.Float3) pbr_shader.CreateInput("normal", Sdf.ValueTypeNames.Color3f).ConnectToSource(normal.ConnectableAPI(), 'rgb') st_input = material.CreateInput('frame:stPrimvarName', Sdf.ValueTypeNames.Token) st_input.Set('st') st_reader.CreateInput('varname',Sdf.ValueTypeNames.Token).ConnectToSource(st_input) billboard.GetPrim().ApplyAPI(UsdShade.MaterialBindingAPI) UsdShade.MaterialBindingAPI(billboard).Bind(material)
6,068
Python
60.30303
146
0.798451
mati-nvidia/omni-code-with-me/scripts/mesh_example.py
# SPDX-License-Identifier: Apache-2.0 from pxr import Usd, UsdGeom, UsdSkel, Gf, Vt stage = Usd.Stage.CreateNew("test_mesh.usda") xform: UsdGeom.Xform = UsdGeom.Xform.Define(stage, "/World") stage.SetDefaultPrim(xform.GetPrim()) # Create Mesh mesh = UsdGeom.Mesh.Define(stage, "/World/Mesh") points = Vt.Vec3fArray([ # Top, Right, Bottom, Left (0.0, 1.0, 0.0), (1.0, 0.0, 0.0), (0.0, -1.0, 0.0), (-1.0, 0.0, 0.0), (0.0, 1.0, 1.0), (1.0, 0.0, 1.0), (0.0, -1.0, 1.0), (-1.0, 0.0, 1.0), ]) face_vert_indices = [ 0, 1, 2, 3, 0, 4, 5, 1, 1, 5, 6, 2, 6, 7, 3, 2, 7, 4, 0, 3, 7, 6, 5, 4 ] face_vert_counts = [4] * int(len(face_vert_indices) / 4.0) #[4] * 2 mesh.CreatePointsAttr(points) mesh.CreateFaceVertexIndicesAttr(face_vert_indices) mesh.CreateFaceVertexCountsAttr(face_vert_counts) #mesh.CreateSubdivisionSchemeAttr("none") stage.Save()
880
Python
24.911764
73
0.614773
mati-nvidia/omni-code-with-me/scripts/variant_set_basics.py
# SPDX-License-Identifier: Apache-2.0 import omni.usd from pxr import UsdGeom stage = omni.usd.get_context().get_stage() xform: UsdGeom.Xform = UsdGeom.Xform.Define(stage, "/World/MyPlane") plane: UsdGeom.Mesh = UsdGeom.Mesh.Define(stage, "/World/MyPlane/Plane") # plane.CreatePointsAttr().Set([(-50, 0, -50), (50, 0, -50), (-50, 0, 50), (50, 0, 50)]) # plane.CreateFaceVertexCountsAttr().Set([4]) # plane.CreateFaceVertexIndicesAttr().Set([0, 2, 3, 1]) # plane.CreateExtentAttr().Set([(-50, 0, -50), (50, 0, 50)]) xform_prim = xform.GetPrim() plane_prim = plane.GetPrim() # prim.GetAttribute("primvars:displayColor").Set([(1.0, 1.0, 0.0)]) ################# # Shading Variant ################# variants = { "default": (1.0, 1.0, 0.0), "red": (1.0, 0.0, 0.0), "blue": (0.0, 0.0, 1.0), "green": (0.0, 1.0, 0.0) } shading_varset = xform_prim.GetVariantSets().AddVariantSet("shading") for variant_name in variants: shading_varset.AddVariant(variant_name) # Author opinions in for each variant. You could do this in the previous for loop too. for variant_name in variants: # You must select a variant to author opinion for it. shading_varset.SetVariantSelection(variant_name) with shading_varset.GetVariantEditContext(): # Specs authored within this context are authored just for the variant. plane_prim.GetAttribute("primvars:displayColor").Set([variants[variant_name]]) # Remember to set the variant you want selected once you're done authoring. shading_varset.SetVariantSelection(list(variants.keys())[0]) ################## # Geometry Variant ################## variants = { "default": { "points": [(-50, 0, -50), (50, 0, -50), (-50, 0, 50), (50, 0, 50)], "indices": [0, 2, 3, 1], "counts": [4] }, "sloped": { "points": [(-50, 0, -50), (50, 0, -50), (-50, 20, 50), (50, 20, 50)], "indices": [0, 2, 3, 1], "counts": [4] }, "stacked": { "points": [(-50, 0, -50), (50, 0, -50), (-50, 0, 50), (50, 0, 50), (-50, 10, -50), (-50, 10, 50), (50, 10, 50)], "indices": [0, 2, 3, 1, 4, 5, 6], "counts": [4, 3] } } shading_varset = xform_prim.GetVariantSets().AddVariantSet("geometry") for variant_name in variants: shading_varset.AddVariant(variant_name) # Author opinions in for each variant. You could do this in the previous for loop too. for variant_name in variants: # You must select a variant to author opinion for it. shading_varset.SetVariantSelection(variant_name) with shading_varset.GetVariantEditContext(): # Specs authored within this context are authored just for the variant. plane.CreatePointsAttr().Set(variants[variant_name]["points"]) plane.CreateFaceVertexCountsAttr().Set(variants[variant_name]["counts"]) plane.CreateFaceVertexIndicesAttr().Set(variants[variant_name]["indices"]) # Remember to set the variant you want selected once you're done authoring. shading_varset.SetVariantSelection(list(variants.keys())[0]) ################## # Add Spheres Variant ################## variants = { "default": 1, "two": 2, "three": 3 } shading_varset = xform_prim.GetVariantSets().AddVariantSet("spheres") for variant_name in variants: shading_varset.AddVariant(variant_name) # Author opinions in for each variant. You could do this in the previous for loop too. for variant_name in variants: # You must select a variant to author opinion for it. shading_varset.SetVariantSelection(variant_name) with shading_varset.GetVariantEditContext(): # Specs authored within this context are authored just for the variant. for x in range(variants[variant_name]): UsdGeom.Sphere.Define(stage, xform_prim.GetPath().AppendPath(f"Sphere_{x}")) # Remember to set the variant you want selected once you're done authoring. shading_varset.SetVariantSelection(list(variants.keys())[0])
3,934
Python
36.122641
120
0.647687
mati-nvidia/omni-code-with-me/scripts/multi_render_ctx_mtl.py
# SPDX-License-Identifier: Apache-2.0 from pxr import Sdf, UsdShade, Gf import omni.usd stage = omni.usd.get_context().get_stage() mtl_path = Sdf.Path("/World/Looks/OmniSurface") mtl = UsdShade.Material.Define(stage, mtl_path) shader = UsdShade.Shader.Define(stage, mtl_path.AppendPath("Shader")) shader.CreateImplementationSourceAttr(UsdShade.Tokens.sourceAsset) # MDL shaders should use "mdl" sourceType shader.SetSourceAsset("OmniSurface.mdl", "mdl") shader.SetSourceAssetSubIdentifier("OmniSurface", "mdl") shader.CreateInput("diffuse_reflection_color", Sdf.ValueTypeNames.Color3f).Set(Gf.Vec3f(1.0, 0.0, 0.0)) # MDL materials should use "mdl" renderContext mtl.CreateSurfaceOutput("mdl").ConnectToSource(shader.ConnectableAPI(), "out") mtl.CreateDisplacementOutput("mdl").ConnectToSource(shader.ConnectableAPI(), "out") mtl.CreateVolumeOutput("mdl").ConnectToSource(shader.ConnectableAPI(), "out") prvw_shader = UsdShade.Shader.Define(stage, mtl_path.AppendPath("prvw_shader")) prvw_shader.CreateIdAttr("UsdPreviewSurface") prvw_shader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Color3f).Set((0.0, 0.0, 1.0)) # Render Context specific to Storm # mtl.CreateSurfaceOutput("glslfx").ConnectToSource(prvw_shader.ConnectableAPI(), "out") # mtl.CreateDisplacementOutput("glslfx").ConnectToSource(prvw_shader.ConnectableAPI(), "out") # mtl.CreateVolumeOutput("glslfx").ConnectToSource(prvw_shader.ConnectableAPI(), "out") # Universal outputs mtl.CreateSurfaceOutput().ConnectToSource(prvw_shader.ConnectableAPI(), "out") mtl.CreateDisplacementOutput().ConnectToSource(prvw_shader.ConnectableAPI(), "out") mtl.CreateVolumeOutput().ConnectToSource(prvw_shader.ConnectableAPI(), "out")
1,690
Python
50.242423
103
0.787574
mati-nvidia/scene-api-sample/README.md
# Scene API Sample Extension ![Scene API Sample Preview](exts/maticodes.scene.sample/data/preview.png) ## Adding This Extension To add a this extension to your Omniverse app: 1. Go into: Extension Manager -> Gear Icon -> Extension Search Path 2. Add this as a search path: `git://github.com/matiascodesal/scene-api-sample.git?branch=main&dir=exts`
349
Markdown
42.749995
104
0.767908
mati-nvidia/scene-api-sample/exts/maticodes.scene.sample/maticodes/scene/sample/manipulators.py
from omni.ui import color as cl from omni.ui import scene as sc class SelectionMarker(sc.Manipulator): """A manipulator that adds a circle with crosshairs above the selected prim.""" def __init__(self, **kwargs): super().__init__(**kwargs) self._radius = 5 self._thickness = 2 self._half_line_length = 10 def on_build(self): if not self.model: return if not self.model.has_selection(): return with sc.Transform(transform=sc.Matrix44.get_translation_matrix(*self.model.position.value)): with sc.Transform(look_at=sc.Transform.LookAt.CAMERA): sc.Arc(self._radius, axis=2, color=cl.yellow) sc.Line([0, -self._half_line_length, 0], [0, self._half_line_length, 0], color=cl.yellow, thickness=self._thickness) sc.Line([-self._half_line_length, 0, 0], [self._half_line_length, 0, 0], color=cl.yellow, thickness=self._thickness) def on_model_updated(self, item): self.invalidate()
1,091
Python
35.399999
100
0.587534
mati-nvidia/scene-api-sample/exts/maticodes.scene.sample/maticodes/scene/sample/extension.py
import omni.ext import omni.kit.commands import omni.ui as ui from omni.ui import scene as sc import omni.usd from .models import CameraModel, SelectionModel from .manipulators import SelectionMarker # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class SceneAPISampleExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[maticodes.scene.sample] SceneAPISampleExtension startup") self._window = SampleWindow() def on_shutdown(self): print("[maticodes.scene.sample] SceneAPISampleExtension shutdown") self._window.destroy() self._window = None class SampleWindow(ui.Window): def __init__(self, title: str = None, **kwargs): if title is None: title = "Viewport" if "width" not in kwargs: kwargs["width"] = 1200 if "height" not in kwargs: kwargs["height"] = 480 kwargs["flags"] = ui.WINDOW_FLAGS_NO_SCROLL_WITH_MOUSE | ui.WINDOW_FLAGS_NO_SCROLLBAR super().__init__(title, **kwargs) self.frame.set_build_fn(self.__build_window) def __build_window(self): scene_view = sc.SceneView(model=CameraModel(), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_HORIZONTAL, scene_aspect_ratio=1280 / 720) with scene_view.scene: SelectionMarker(model=SelectionModel())
1,802
Python
36.562499
119
0.660932
mati-nvidia/scene-api-sample/exts/maticodes.scene.sample/maticodes/scene/sample/models.py
from typing import List import carb import omni.kit.viewport_legacy as vp from omni.ui import scene as sc import omni.usd from pxr import Gf, Sdf, Tf, Usd, UsdGeom class SelectionModel(sc.AbstractManipulatorModel): """A data model storing the current selected object. Tracks selection changes using omni.usd.StageEventType.SELECTION_CHANGED and position changes for the selected prim using Tf.Notice. """ class PositionItem(sc.AbstractManipulatorItem): def __init__(self): super().__init__() self.value = [0, 0, 0] def __init__(self): super().__init__() self.position = SelectionModel.PositionItem() self._offset = 5 self._current_path = "" usd_context = omni.usd.get_context() self._stage: Usd.Stage = usd_context.get_stage() if self._stage: self._stage_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._notice_changed, self._stage) self._selection = usd_context.get_selection() self._events = usd_context.get_stage_event_stream() self._stage_event_sub = self._events.create_subscription_to_pop(self._on_stage_event, name="Selection Update") def _notice_changed(self, notice, stage): """Update model with the selected prim's latest position.""" for p in notice.GetChangedInfoOnlyPaths(): if self._current_path in str(p.GetPrimPath()): self.position.value = self._get_position() self._item_changed(self.position) def _on_stage_event(self, event): """Update model with the latest selected prim and its position. Only tracks the first selected prim. Not multi-selct. """ if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED): self._current_path = "" if not self._stage: return prim_paths = self._selection.get_selected_prim_paths() if not prim_paths: self.position.value = [0, 0, 0] self._item_changed(self.position) return prim = self._stage.GetPrimAtPath(prim_paths[0]) if not prim.IsA(UsdGeom.Imageable): self._prim = None return self._prim = prim self._current_path = prim_paths[0] self.position.value = self._get_position() self._item_changed(self.position) def _get_position(self): if not self._current_path: return [0, 0, 0] prim = self._stage.GetPrimAtPath(self._current_path) box_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), includedPurposes=[UsdGeom.Tokens.default_]) bound = box_cache.ComputeWorldBound(prim) range_ = bound.ComputeAlignedBox() bboxMin = range_.GetMin() bboxMax = range_.GetMax() position = [(bboxMin[0] + bboxMax[0]) * 0.5, bboxMax[1] + self._offset, (bboxMin[2] + bboxMax[2]) * 0.5] return position def has_selection(self): return self._current_path != "" class CameraModel(sc.AbstractManipulatorModel): def __init__(self): super().__init__() self._camera_prim = None self._camera_path = None self._stage_listener = None def on_usd_context_event(event: carb.events.IEvent): """Register/Re-register Tf.Notice callbacks on UsdContext changes.""" event_type = event.type if event_type == int(omni.usd.StageEventType.OPENED) or event_type == int(omni.usd.StageEventType.CLOSING): if self._stage_listener: self._stage_listener.Revoke() self._stage_listener = None self._camera_prim = None self._camera_path = None if event_type == int(omni.usd.StageEventType.OPENED): stage = omni.usd.get_context().get_stage() self._stage_listener = Tf.Notice.Register(Usd.Notice.ObjectChanged, self._notice_changed, stage) self._get_camera() usd_ctx = omni.usd.get_context() self._stage_event_sub = usd_ctx.get_stage_event_stream().create_subscription_to_pop(on_usd_context_event, name="CameraModel stage event") stage = usd_ctx.get_stage() if stage: self._stage_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._notice_changed, stage) def destroy(self): self._stage_event_sub = None if self._stage_listener: self._stage_listener.Revoke() self._stage_listener = None self._camera_prim = None self._camera_path = None super().destroy() def get_as_floats(self, item): if item == self.get_item("projection"): return self._get_projection() if item == self.get_item("view"): return self._get_view() def _notice_changed(self, notice, stage): for p in notice.GetChangedInfoOnlyPaths(): if p.GetPrimPath() == self._camera_path: self._item_changed(None) @staticmethod def _flatten(transform): """Need to convert Gf.Matrix4d into a list for Scene API. This is the fastest way.""" return [ transform[0][0], transform[0][1], transform[0][2], transform[0][3], transform[1][0], transform[1][1], transform[1][2], transform[1][3], transform[2][0], transform[2][1], transform[2][2], transform[2][3], transform[3][0], transform[3][1], transform[3][2], transform[3][3], ] def _get_camera(self): if not self._camera_prim: viewport_window = vp.get_default_viewport_window() stage = omni.usd.get_context(viewport_window.get_usd_context_name()).get_stage() if stage: self._camera_path = Sdf.Path(viewport_window.get_active_camera()) self._camera_prim = stage.GetPrimAtPath(self._camera_path) if self._camera_prim: return UsdGeom.Camera(self._camera_prim).GetCamera().frustum def _get_view(self) -> List[float]: frustum = self._get_camera() if frustum: view = frustum.ComputeViewMatrix() else: view = Gf.Matrix4d(1.0) return self._flatten(view) def _get_projection(self) -> List[float]: frustum = self._get_camera() if frustum: projection = frustum.ComputeProjectionMatrix() else: projection = Gf.Matrix4d(1.0) return self._flatten(projection)
6,588
Python
37.086705
145
0.592289
mati-nvidia/scene-api-sample/exts/maticodes.scene.sample/config/extension.toml
[package] # Semantic Versionning is used: https://semver.org/ version = "1.1.0" # The title and description fields are primarily for displaying extension info in UI title = "Scene API Sample" description="A sample of how to mark an selected object using the omni.ui.scene API." authors=["Matias Codesal <mcodesal@nvidia.com>"] preview_image = "data/preview.png" # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" # URL of the extension source repository. repository = "https://github.com/mati-nvidia/scene-api-sample" # One of categories for UI. category = "Example" # Keywords for the extension keywords = ["kit", "example"] # Use omni.ui to build simple UI [dependencies] "omni.kit.uiapp" = {} "omni.ui.scene" = {} "omni.usd" = {} # Main python module this extension provides, it will be publicly available as "import omni.hello.world". [[python.module]] name = "maticodes.scene.sample"
945
TOML
28.562499
105
0.731217
mati-nvidia/omni-bookshelf-generator/README.md
# Bookshelf Generator This NVIDIA Omniverse Kit Extension procedurally creates bookshelves with variable height and width and it fills the shelves with books. It is part of a series of live coding sessions that I worked on. This is a great project to study if you are interested in learning more about USD, PointInstancers and Kit Extensions. Watch the recordings of the [full Bookshelf Generator live coding series](https://www.youtube.com/playlist?list=PL3jK4xNnlCVcDS_DgtTSAljdC2KUliU1F). ![Bookshelf Generator](exts/maticodes.generator.bookshelf/data/clip.gif) ## Usage See the extension's README for [usage instructions](exts/maticodes.generator.bookshelf/docs/README.md). ## App Link Setup If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included. Run: ``` > link_app.bat ``` If successful you should see `app` folder link in the root of this repo. If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app: ``` > link_app.bat --app create ``` You can also just pass a path to create link to: ``` > link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4" ``` ## Attribution Icon made by [Freepik](https://www.flaticon.com/authors/freepik) from [www.flaticon.com](www.flaticon.com) ## Contributing The source code for this repository is provided as-is and we are not accepting outside contributions.
1,589
Markdown
39.76923
469
0.773442
mati-nvidia/omni-bookshelf-generator/exts/maticodes.generator.bookshelf/maticodes/generator/bookshelf/extension.py
# SPDX-License-Identifier: Apache-2.0 import omni.ext from .ui import BookshelfGenWindow # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class BookshelfGeneratorExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[maticodes.generator.bookshelf] BookshelfGeneratorExtension startup") self._window = BookshelfGenWindow("Bookshelf Generator", width=300, height=300) def on_shutdown(self): print("[maticodes.generator.bookshelf] BookshelfGeneratorExtension shutdown") self._window.destroy() self._window = None
974
Python
37.999998
119
0.727926
mati-nvidia/omni-bookshelf-generator/exts/maticodes.generator.bookshelf/maticodes/generator/bookshelf/utils.py
from signal import SIG_DFL # SPDX-License-Identifier: Apache-2.0 from pxr import UsdGeom def stage_up_adjust(stage, values, vec_type): if UsdGeom.GetStageUpAxis(stage) == UsdGeom.Tokens.z: return vec_type(values[0], values[2], values[1]) else: return vec_type(*values)
296
Python
23.749998
57
0.689189
mati-nvidia/omni-bookshelf-generator/exts/maticodes.generator.bookshelf/maticodes/generator/bookshelf/generator.py
# SPDX-License-Identifier: Apache-2.0 import random import typing import weakref from pathlib import Path import carb import omni.kit.app import omni.kit.commands import omni.usd from omni.kit.property.usd.prim_selection_payload import PrimSelectionPayload from pxr import Gf, Sdf, Usd, UsdGeom from .utils import stage_up_adjust BOOK_A_USD = Path(__file__).parent.parent.parent.parent / "data" / "book_A.usd" CUBE_POINTS_TEMPLATE = [(-1, -1, -1), (1, -1, -1), (-1, -1, 1), (1, -1, 1), (-1, 1, -1), (1, 1, -1), (1, 1, 1), (-1, 1, 1)] class BookshelfGenerator: def __init__(self, asset_root_path: typing.Union[str, Sdf.Path]=None) -> None: self._stage:Usd.Stage = omni.usd.get_context().get_stage() if asset_root_path is None: self.create_new() else: if isinstance(asset_root_path, str): self._asset_root_path = Sdf.Path(asset_root_path) else: self._asset_root_path = asset_root_path self.from_usd() self._stage_subscription = omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop( self._on_usd_context_event, name="Bookshelf Generator USD Stage Open Listening" ) def _on_usd_context_event(self, event: carb.events.IEvent): if event.type == int(omni.usd.StageEventType.OPENED): self._stage = omni.usd.get_context().get_stage() def from_usd(self): prim = self._stage.GetPrimAtPath(self._asset_root_path) self.width = prim.GetAttribute("bookshelf_gen:width").Get() self.height = prim.GetAttribute("bookshelf_gen:height").Get() self.depth = prim.GetAttribute("bookshelf_gen:depth").Get() self.thickness = prim.GetAttribute("bookshelf_gen:thickness").Get() self.num_shelves = prim.GetAttribute("bookshelf_gen:numShelves").Get() self.geom_scope_path = self._asset_root_path.AppendPath("Geometry") self.looks_scope_path = self._asset_root_path.AppendPath("Looks") instancer_path = self.geom_scope_path.AppendPath("BooksInstancer") self.instancer = UsdGeom.PointInstancer(self._stage.GetPrimAtPath(instancer_path)) look_prim = self._stage.GetPrimAtPath(self.looks_scope_path) self.shelf_mtl_path = look_prim.GetChildren()[0].GetPath() def create_shelf_material(self, looks_scope_path): self.shelf_mtl_path = Sdf.Path(omni.usd.get_stage_next_free_path(self._stage, looks_scope_path.AppendPath("Cherry"), False)) result = omni.kit.commands.execute('CreateMdlMaterialPrimCommand', mtl_url='http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Wood/Cherry.mdl', mtl_name='Cherry', mtl_path=str(self.shelf_mtl_path)) omni.kit.commands.execute('SelectPrims', old_selected_paths=[], new_selected_paths=[str(self.shelf_mtl_path)], expand_in_stage=True) @property def books_instancer_path(self): return self.instancer.GetPath() @property def asset_root_path(self): return self._asset_root_path def create_new(self): self._asset_root_path = Sdf.Path(omni.usd.get_stage_next_free_path(self._stage, "/World/Bookshelf", False)) self.geom_scope_path = Sdf.Path(omni.usd.get_stage_next_free_path(self._stage, self._asset_root_path.AppendPath("Geometry"), False)) self.looks_scope_path = Sdf.Path(omni.usd.get_stage_next_free_path(self._stage, self._asset_root_path.AppendPath("Looks"), False)) omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=str(self._asset_root_path)) omni.kit.commands.execute('CreatePrim', prim_type='Scope', prim_path=str(self.geom_scope_path)) omni.kit.commands.execute('CreatePrim', prim_type='Scope', prim_path=str(self.looks_scope_path)) self.create_shelf_material(self.looks_scope_path) prototypes_container_path = self.geom_scope_path.AppendPath("Prototypes") self.width = 150 self.height = 200 self.depth = 25 self.thickness = 2 self.num_shelves = 3 self.randomize_scale = True self.set_bookshelf_attrs() instancer_path = Sdf.Path(omni.usd.get_stage_next_free_path( self._stage, self.geom_scope_path.AppendPath("BooksInstancer"), False) ) self.instancer = UsdGeom.PointInstancer.Define(self._stage, instancer_path) omni.kit.commands.execute('AddXformOp', payload=PrimSelectionPayload(weakref.ref(self._stage), [instancer_path]), precision=UsdGeom.XformOp.PrecisionDouble, rotation_order='XYZ', add_translate_op=True, add_rotateXYZ_op=True, add_orient_op=False, add_scale_op=True, add_transform_op=False, add_pivot_op=False) self.instancer.CreatePositionsAttr().Set([]) self.instancer.CreateScalesAttr().Set([]) self.instancer.CreateProtoIndicesAttr().Set([]) def set_bookshelf_attrs(self): asset_root_prim:Usd.Prim = self._stage.GetPrimAtPath(self._asset_root_path) asset_root_prim.CreateAttribute("bookshelf_gen:width", Sdf.ValueTypeNames.Float, custom=True).Set(self.width) asset_root_prim.CreateAttribute("bookshelf_gen:height", Sdf.ValueTypeNames.Float, custom=True).Set(self.height) asset_root_prim.CreateAttribute("bookshelf_gen:depth", Sdf.ValueTypeNames.Float, custom=True).Set(self.depth) asset_root_prim.CreateAttribute("bookshelf_gen:thickness", Sdf.ValueTypeNames.Float, custom=True).Set(self.thickness) asset_root_prim.CreateAttribute("bookshelf_gen:numShelves", Sdf.ValueTypeNames.Float, custom=True).Set(self.num_shelves) def create_default_prototypes(self): asset_root_prim:Usd.Prim = self._stage.GetPrimAtPath(self._asset_root_path) geom_scope_path = self._asset_root_path.AppendPath("Geometry") prototypes:Usd.Prim = self._stage.OverridePrim(geom_scope_path.AppendPath("Prototypes")) book_stage:Usd.Stage = Usd.Stage.Open(str(BOOK_A_USD)) default_prim = book_stage.GetDefaultPrim() variants = default_prim.GetVariantSet("color").GetVariantNames() paths = [] for variant in variants: book_path = omni.usd.get_stage_next_free_path(self._stage, prototypes.GetPath().AppendPath("book_A"), False ) omni.kit.commands.execute('CreateReference', path_to=book_path, asset_path=str(BOOK_A_USD), usd_context=omni.usd.get_context() ) prim = self._stage.GetPrimAtPath(book_path) prim.GetVariantSet("color").SetVariantSelection(variant) asset_xform = UsdGeom.Xform(prim) if UsdGeom.GetStageUpAxis(self._stage) == UsdGeom.Tokens.z: rotate_attr = prim.GetAttribute("xformOp:rotateXYZ") rotation = rotate_attr.Get() rotation[2] = 0 rotate_attr.Set(rotation) asset_xform.SetResetXformStack(True) paths.append(book_path) prototypes.SetSpecifier(Sdf.SpecifierOver) self.instancer.CreatePrototypesRel().SetTargets(paths) def get_prototype_attrs(self): self.prototype_widths = [] self.prototype_paths = [] bbox_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), includedPurposes=[UsdGeom.Tokens.default_]) self.prototype_paths = self.instancer.GetPrototypesRel().GetForwardedTargets() if len(self.prototype_paths) < 1: raise ValueError("You must provide at least one prototype.") proto_container = self.prototype_paths[0].GetParentPath() container_prim = self._stage.GetPrimAtPath(proto_container) container_prim.SetSpecifier(Sdf.SpecifierDef) for prototype_path in self.prototype_paths: proto_prim = self._stage.GetPrimAtPath(prototype_path) bbox = bbox_cache.ComputeWorldBound(proto_prim) bbox_range = bbox.GetRange() bbox_min = bbox_range.GetMin() bbox_max = bbox_range.GetMax() self.prototype_widths.append(bbox_max[0] - bbox_min[0]) container_prim.SetSpecifier(Sdf.SpecifierOver) def generate(self, width=100, height=250, depth=20, thickness=2, num_shelves=3, randomize_scale=True): self.width = width self.height = height self.depth = depth self.thickness = thickness self.num_shelves = num_shelves self.randomize_scale = randomize_scale self.set_bookshelf_attrs() self.positions = [] self.scales = [] self.proto_ids = [] self.get_prototype_attrs() self.clear_boards() self.create_frame() self.create_shelves(self.num_shelves) omni.usd.get_context().get_selection().clear_selected_prim_paths() def clear_boards(self): geom_scope_prim: Usd.Prim = self._stage.GetPrimAtPath(self.geom_scope_path) boards = [] for child in geom_scope_prim.GetAllChildren(): if child.GetName().startswith("Board"): boards.append(child.GetPath()) omni.kit.commands.execute('DeletePrims', paths=boards, destructive=False ) def create_books(self, shelf_height): x = 0 def get_random_id(): return random.randint(0, len(self.prototype_paths)-1) id = get_random_id() while True: if self.randomize_scale: width_scalar = random.random() * 1 + 1 height_scalar = random.random() * 0.5 + 1 else: width_scalar = 1 height_scalar = 1 if x + self.prototype_widths[id] * width_scalar > self.width: break pos = stage_up_adjust(self._stage, [x + self.prototype_widths[id] * width_scalar / 2, shelf_height, 0], Gf.Vec3f ) self.positions.append(pos) scale = stage_up_adjust(self._stage, [width_scalar, height_scalar, 1], Gf.Vec3d ) self.scales.append(scale) self.proto_ids.append(id) # Update for next loop for next loop x += self.prototype_widths[id] * width_scalar id = get_random_id() self.instancer.GetPositionsAttr().Set(self.positions) self.instancer.GetScalesAttr().Set(self.scales) self.instancer.GetProtoIndicesAttr().Set(self.proto_ids) def create_shelves(self, num_shelves): translate_attr = self.instancer.GetPrim().GetAttribute("xformOp:translate") translate_attr.Set(stage_up_adjust(self._stage, [-self.width/2, self.thickness/2, 0], Gf.Vec3d)) # Put books on the bottom of the frame self.create_books(self.thickness/2) # Generate the other shelves if num_shelves > 0: offset = self.height / (num_shelves + 1) for num in range(1, num_shelves + 1): board = self.create_board(self.width) shelf_y_pos = num * offset + self.thickness/2 translate = stage_up_adjust(self._stage, [0, shelf_y_pos, 0], Gf.Vec3d) board.GetAttribute("xformOp:translate").Set(translate) self.create_books(shelf_y_pos) def create_frame(self): # bottom board = self.create_board(self.width) translate = stage_up_adjust(self._stage, [0, self.thickness/2, 0], Gf.Vec3d) board.GetAttribute("xformOp:translate").Set(translate) # top board = self.create_board(self.width) translate = stage_up_adjust(self._stage, [0, self.height - self.thickness/2, 0], Gf.Vec3d) board.GetAttribute("xformOp:translate").Set(translate) # left board = self.create_board(self.height) translate = stage_up_adjust(self._stage, [-self.width/2 - self.thickness/2, self.height/2, 0], Gf.Vec3d) board.GetAttribute("xformOp:translate").Set(translate) rotate = stage_up_adjust(self._stage, [0, 0, 90], Gf.Vec3d) board.GetAttribute("xformOp:rotateXYZ").Set(rotate) # right board = self.create_board(self.height) translate = stage_up_adjust(self._stage, [self.width/2 + self.thickness/2, self.height/2, 0], Gf.Vec3d) board.GetAttribute("xformOp:translate").Set(translate) rotate = stage_up_adjust(self._stage, [0, 0, 90], Gf.Vec3d) board.GetAttribute("xformOp:rotateXYZ").Set(rotate) def create_board(self, width): cube_prim_path = omni.usd.get_stage_next_free_path(self._stage, self.geom_scope_path.AppendPath("Board"), False) success, result = omni.kit.commands.execute('CreateMeshPrimWithDefaultXform', prim_type='Cube') omni.kit.commands.execute('MovePrim', path_from=result, path_to=cube_prim_path) result = omni.kit.commands.execute('BindMaterialCommand', prim_path=cube_prim_path, material_path=str(self.shelf_mtl_path), strength='strongerThanDescendants') tx_scale_y = self.depth / width # shader_prim = self._stage.GetPrimAtPath(mtl_path.AppendPath("Shader")) # tx_scale_attr = shader_prim.CreateAttribute("inputs:texture_scale", Sdf.ValueTypeNames.Float2) # tx_scale_attr.Set((1.0, tx_scale_y)) cube_prim = self._stage.GetPrimAtPath(cube_prim_path) uv_attr = cube_prim.GetAttribute("primvars:st") uvs = uv_attr.Get() for x in range(len(uvs)): uvs[x] = (uvs[x][0], uvs[x][1] * tx_scale_y) uv_attr.Set(uvs) points_attr = cube_prim.GetAttribute("points") scaled_points = [] for point in CUBE_POINTS_TEMPLATE: x = width / 2 * point[0] y = self.thickness / 2 * point[1] z = self.depth / 2 * point[2] scale = stage_up_adjust(self._stage, [x, y, z], Gf.Vec3d) scaled_points.append(scale) points_attr.Set(scaled_points) return cube_prim
14,328
Python
43.638629
140
0.619417
mati-nvidia/omni-bookshelf-generator/exts/maticodes.generator.bookshelf/maticodes/generator/bookshelf/ui.py
# SPDX-License-Identifier: Apache-2.0 import carb import omni.ui as ui import omni.usd from omni.kit.property.usd.relationship import RelationshipEditWidget from .generator import BookshelfGenerator class PrototypesRelEditWidget(RelationshipEditWidget): def __init__(self, stage, attr_name, prim_paths): kwargs = { "on_remove_target": self.on_change_cb, "target_picker_on_add_targets": self.on_change_cb } super().__init__(stage, attr_name, prim_paths, additional_widget_kwargs=kwargs) def on_change_cb(self, *args): self._set_dirty() class BookshelfGenWindow(ui.Window): def __init__(self, title: str, **kwargs) -> None: super().__init__(title, **kwargs) self._stage = omni.usd.get_context().get_stage() self.bookshelves = [] self.current_index = -1 self.current_bookshelf = None self.frame.set_build_fn(self.build_frame) self._combo_changed_sub = None self._stage_subscription = omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop( self._on_usd_context_event, name="Bookshelf Generator UI USD Stage Open Listening" ) self.randomize_book_model = ui.SimpleBoolModel(True) def _on_usd_context_event(self, event: carb.events.IEvent): if event.type == int(omni.usd.StageEventType.OPENED): self._stage = omni.usd.get_context().get_stage() def get_bookshelves(self): self.bookshelves = [] bookshelf_paths = [] for prim in self._stage.Traverse(): if prim.HasAttribute("bookshelf_gen:width"): self.bookshelves.append(BookshelfGenerator(prim.GetPath())) bookshelf_paths.append(prim.GetPath()) if (self.current_bookshelf is not None and self.current_index < len(self.bookshelves) and self.current_bookshelf.asset_root_path == self.bookshelves[self.current_index].asset_root_path): return self.current_index = -1 self.current_bookshelf = None for i, bookshelf_path in enumerate(bookshelf_paths): if (self.current_bookshelf is not None and bookshelf_path == self.current_bookshelf.asset_root_path): self.current_index = i self.current_bookshelf: BookshelfGenerator = self.bookshelves[self.current_index] def build_frame(self): self.get_bookshelves() with ui.VStack(): with ui.HStack(height=0): combo_model = ui.ComboBox(self.current_index,*[str(x.asset_root_path) for x in self.bookshelves]).model def combo_changed(item_model, item): value_model = item_model.get_item_value_model(item) self.current_bookshelf = self.bookshelves[value_model.as_int] self.current_index = value_model.as_int self.reload_frame() self._combo_changed_sub = combo_model.subscribe_item_changed_fn(combo_changed) def create_new(): self.current_bookshelf: BookshelfGenerator = BookshelfGenerator() self.bookshelves.append(self.current_bookshelf) self.current_index = len(self.bookshelves) - 1 value_model = combo_model.get_item_value_model() value_model.set_value(self.current_index) ui.Button("Create New", width=0, clicked_fn=lambda: create_new()) def reload_frame(): self.reload_frame() ui.Button("Reload", width=0, clicked_fn=lambda: reload_frame()) if self.current_index == -1: if len(self.bookshelves) > 0: ui.Label("Create a new bookshelf or select an existing one from the dropdown to get started.", word_wrap=True, alignment=ui.Alignment.CENTER ) else: ui.Label("Create a new bookshelf or to get started.", word_wrap=True, alignment=ui.Alignment.CENTER ) else: def create_default_prototypes(): self.current_bookshelf.create_default_prototypes() self.proto_edit_widget.on_change_cb() with ui.VStack(height=0): with ui.CollapsableFrame("Prototypes", collapsed=True): with ui.VStack(): ui.Button("Create Default Prototypes", height=0, clicked_fn=lambda: create_default_prototypes()) with ui.ScrollingFrame(height=200): with ui.VStack(): self.proto_edit_widget = PrototypesRelEditWidget( self._stage, "prototypes", [self.current_bookshelf.books_instancer_path] ) ui.Spacer() ui.Spacer() ui.Spacer() ui.Spacer() with ui.HStack(height=0, style={"margin_height":1}): ui.Label("Width (cm): ") self.width_model = ui.SimpleIntModel(int(self.current_bookshelf.width)) ui.IntField(model=self.width_model, width=50) with ui.HStack(height=0, style={"margin_height":1}): ui.Label("Height (cm): ") self.height_model = ui.SimpleIntModel(int(self.current_bookshelf.height)) ui.IntField(model=self.height_model, width=50) with ui.HStack(height=0, style={"margin_height":1}): ui.Label("Depth (cm): ") self.depth_model = ui.SimpleIntModel(int(self.current_bookshelf.depth)) ui.IntField(model=self.depth_model, width=50) with ui.HStack(height=0, style={"margin_height":1}): ui.Label("Thickness (cm): ") self.thickness_model = ui.SimpleIntModel(int(self.current_bookshelf.thickness)) ui.IntField(model=self.thickness_model, width=50) with ui.HStack(height=0, style={"margin_height":1}): ui.Label("Number of Shelves: ") self.num_shelves_model = ui.SimpleIntModel(int(self.current_bookshelf.num_shelves)) ui.IntField(model=self.num_shelves_model, width=50) with ui.HStack(height=0, style={"margin_height":1}): ui.Label("Randomize Book Scale:") ui.CheckBox(model=self.randomize_book_model, width=50) def on_click(): self.current_bookshelf.generate( width=self.width_model.as_int, height=self.height_model.as_int, depth=self.depth_model.as_int, thickness=self.thickness_model.as_int, num_shelves=self.num_shelves_model.as_int, randomize_scale=self.randomize_book_model.as_bool ) ui.Button("Generate", height=40, clicked_fn=lambda: on_click()) def reload_frame(self): self.frame.rebuild() def destroy(self) -> None: self._combo_changed_sub = None self._stage_subscription.unsubscribe() return super().destroy()
7,894
Python
46.848485
124
0.529263
mati-nvidia/omni-bookshelf-generator/exts/maticodes.generator.bookshelf/config/extension.toml
[package] # Semantic Versionning is used: https://semver.org/ version = "1.0.0" # The title and description fields are primarily for displaying extension info in UI title = "Bookshelf Generator" description="A procedural modeling tool for creating bookshelves with arrayed books." author = "Matias Codesal" # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" changelog = "docs/CHANGELOG.md" # preview_image = "" icon = "data/icon.png" # Preview to show in the extension manager preview_image = "data/preview.png" # URL of the extension source repository. repository = "https://github.com/mati-nvidia/omni-bookshelf-generator" # One of categories for UI. category = "Modeling" # Keywords for the extension keywords = ["modeling", "procedural", "pointinstancer", "instancing", "book", "bookshelf", "bookcase"] # Use omni.ui to build simple UI [dependencies] "omni.kit.uiapp" = {} "omni.usd" = {} # Main python module this extension provides, it will be publicly available as "import maticodes.generator.bookshelf". [[python.module]] name = "maticodes.generator.bookshelf"
1,129
TOML
27.974358
118
0.74225
mati-nvidia/omni-bookshelf-generator/exts/maticodes.generator.bookshelf/docs/CHANGELOG.md
## [Unreleased] ## [1.0.0] - 2022-10-13 ### Added - Initial release of the Bookshelf Generator. Feature complete from the live coding sessions.
144
Markdown
27.999994
93
0.715278
mati-nvidia/omni-bookshelf-generator/exts/maticodes.generator.bookshelf/docs/README.md
# Bookshelf Generator A procedural modeling tool for creating bookshelves with arrayed books. You can set parameters like height, width, and number of shelves to control the generator. The extension comes with a sample book asset or you can use your own books or props as prototypes for the PointInstancer. ## Usage 1. Press the `Create New` button to create a new bookshelf. 2. Expand the Prototypes collapsible frame and click `Create Default Prototypes`. Optionally, you can provide your own prototypes. 1. NOTE: Prototype targets should be an Xform and the pivot should be centered on the bottom of the model. 3. Set the rest of the parameters. 4. Click `Generate`. You can change the parameters and click `Generate` again to regenerate. **NOTE:** Use the combobox to select a different bookshelf in the stage to operate on. **NOTE:** Use the `Reload` button to refresh the combobox. ## Attribution Icon made by [Freepik](https://www.flaticon.com/authors/freepik) from [www.flaticon.com](www.flaticon.com)
1,020
Markdown
55.722219
130
0.77549