|
{ |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict", |
|
"featuresDict": { |
|
"features": { |
|
"steps": { |
|
"pythonClassName": "tensorflow_datasets.core.features.dataset_feature.Dataset", |
|
"sequence": { |
|
"feature": { |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict", |
|
"featuresDict": { |
|
"features": { |
|
"observation": { |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict", |
|
"featuresDict": { |
|
"features": { |
|
"robot_obs": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"15" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
}, |
|
"description": "EE position (3), EE orientation in euler angles (3), gripper width (1), joint positions (7), gripper action (1)" |
|
}, |
|
"structured_language_instruction": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "string", |
|
"encoding": "none" |
|
}, |
|
"description": "One of 25 possible structured language instructions, see list in https://arxiv.org/pdf/2210.01911.pdf Table 2." |
|
}, |
|
"natural_language_instruction": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "string", |
|
"encoding": "none" |
|
}, |
|
"description": "Natural language instruction is a natural language instruction randomly sampled based on potential task synonyms derived from the structured language task. For example, 'turn blue light off' may map to 'switch the blue color light to off'." |
|
}, |
|
"rgb_static": { |
|
"pythonClassName": "tensorflow_datasets.core.features.image_feature.Image", |
|
"image": { |
|
"shape": { |
|
"dimensions": [ |
|
"256", |
|
"256", |
|
"3" |
|
] |
|
}, |
|
"dtype": "uint8", |
|
"encodingFormat": "jpeg" |
|
}, |
|
"description": "RGB static image of shape. (150, 200, 3). Subsampled from (200,200, 3) image." |
|
}, |
|
"natural_language_embedding": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"512" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
}, |
|
"rgb_gripper": { |
|
"pythonClassName": "tensorflow_datasets.core.features.image_feature.Image", |
|
"image": { |
|
"shape": { |
|
"dimensions": [ |
|
"256", |
|
"256", |
|
"3" |
|
] |
|
}, |
|
"dtype": "uint8", |
|
"encodingFormat": "jpeg" |
|
} |
|
}, |
|
"depth_static": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"256", |
|
"256" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
}, |
|
"depth_gripper": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"256", |
|
"256" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
} |
|
} |
|
} |
|
}, |
|
"is_first": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "bool", |
|
"encoding": "none" |
|
} |
|
}, |
|
"reward": { |
|
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
}, |
|
"is_terminal": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "bool", |
|
"encoding": "none" |
|
} |
|
}, |
|
"is_last": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "bool", |
|
"encoding": "none" |
|
} |
|
}, |
|
"action": { |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict", |
|
"featuresDict": { |
|
"features": { |
|
"actions": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"7" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
}, |
|
"description": "absolute desired values for gripper pose (first 6 dimensions are x, y, z, yaw, pitch, roll), last dimension is open_gripper (-1 is open gripper, 1 is close)" |
|
}, |
|
"rel_actions_world": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"7" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
}, |
|
"description": "relative actions for gripper pose in the robot base frame (first 6 dimensions are x, y, z, yaw, pitch, roll), last dimension is open_gripper (-1 is open gripper, 1 is close)" |
|
}, |
|
"rel_actions_gripper": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"7" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
}, |
|
"description": "relative actions for gripper pose in the gripper camera frame (first 6 dimensions are x, y, z, yaw, pitch, roll), last dimension is open_gripper (-1 is open gripper, 1 is close)" |
|
}, |
|
"terminate_episode": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
} |
|
} |
|
} |
|
} |
|
} |
|
} |
|
}, |
|
"length": "-1" |
|
} |
|
} |
|
} |
|
} |
|
} |