|
{ |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict", |
|
"featuresDict": { |
|
"features": { |
|
"steps": { |
|
"pythonClassName": "tensorflow_datasets.core.features.dataset_feature.Dataset", |
|
"sequence": { |
|
"feature": { |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict", |
|
"featuresDict": { |
|
"features": { |
|
"observation": { |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict", |
|
"featuresDict": { |
|
"features": { |
|
"joint_states": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"7" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
}, |
|
"description": "joint values" |
|
}, |
|
"gripper_states": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"1" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
}, |
|
"description": "gripper_states = 0 means the gripper is fully closed. The value represents the gripper width of Franka Panda Gripper." |
|
}, |
|
"ee_states": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"16" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
}, |
|
"description": "Pose of the end effector specified as a homogenous matrix." |
|
}, |
|
"natural_language_instruction": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "string", |
|
"encoding": "none" |
|
} |
|
}, |
|
"eye_in_hand_rgb": { |
|
"pythonClassName": "tensorflow_datasets.core.features.image_feature.Image", |
|
"image": { |
|
"shape": { |
|
"dimensions": [ |
|
"256", |
|
"256", |
|
"3" |
|
] |
|
}, |
|
"dtype": "uint8", |
|
"encodingFormat": "jpeg" |
|
}, |
|
"description": "RGB captured by in hand camera" |
|
}, |
|
"agentview_rgb": { |
|
"pythonClassName": "tensorflow_datasets.core.features.image_feature.Image", |
|
"image": { |
|
"shape": { |
|
"dimensions": [ |
|
"256", |
|
"256", |
|
"3" |
|
] |
|
}, |
|
"dtype": "uint8", |
|
"encodingFormat": "jpeg" |
|
}, |
|
"description": "RGB captured by workspace camera" |
|
}, |
|
"natural_language_embedding": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"512" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
} |
|
} |
|
} |
|
}, |
|
"action": { |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict", |
|
"featuresDict": { |
|
"features": { |
|
"world_vector": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"3" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
}, |
|
"gripper_closedness_action": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
}, |
|
"rotation_delta": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": { |
|
"dimensions": [ |
|
"3" |
|
] |
|
}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
}, |
|
"terminate_episode": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
} |
|
} |
|
} |
|
}, |
|
"is_first": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "bool", |
|
"encoding": "none" |
|
} |
|
}, |
|
"is_terminal": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "bool", |
|
"encoding": "none" |
|
} |
|
}, |
|
"is_last": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "bool", |
|
"encoding": "none" |
|
} |
|
}, |
|
"reward": { |
|
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar", |
|
"tensor": { |
|
"shape": {}, |
|
"dtype": "float32", |
|
"encoding": "none" |
|
} |
|
} |
|
} |
|
} |
|
}, |
|
"length": "-1" |
|
} |
|
} |
|
} |
|
} |
|
} |