File size: 23,498 Bytes
87107d7
 
 
1
2
3
4
xmanager_id: 59470521
xmanager_wid: 1
gin_config: "from __gin__ import dynamic_registration\nfrom gin import config\nfrom google3.robotics.learning.task_explore.tasks.fractal.data.rds.datasets import language_table\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.filters\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.fractal_rds_input_registry as google32\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.model_spec as google33\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.rds_input_builder as google34\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.transforms as google35\nimport google3.robotics.learning.task_explore.tasks.fractal.t2r_models.fractal_critic_model_builder as google36\nimport google3.robotics.learning.task_explore.tasks.fractal.t2r_models.moma_critic_model_builder as google37\nfrom google3.robotics.learning.task_explore.tasks.fractal.tf_agents import action_tokenizer\nimport google3.robotics.learning.task_explore.tasks.fractal.tf_agents.launch.gin_helper as google38\nfrom google3.robotics.learning.task_explore.tasks.fractal.tf_agents import observation_tokenizer\nimport google3.robotics.learning.task_explore.tasks.fractal.tf_agents.sample_clipper as google39\nfrom google3.robotics.learning.task_explore.tasks.mobile_grasping.t2r_models import grasping_44_critic_fat_model_builder\nimport google3.robotics.learning.task_explore.tf_agents.bc0.bc0_actor_net as google310\nfrom google3.robotics.learning.task_explore.tf_agents.bc0 import pretrained_efficientnet_encoder\nimport google3.robotics.learning.task_explore.tf_agents.episode_statistics_visualizations as google311\nimport google3.robotics.learning.task_explore.tf_agents.kuka_e2e_grasping.grasping_net as google312\nimport google3.robotics.learning.task_explore.tf_agents.kuka_e2e_grasping.workspace_clip_policy as google313\nfrom google3.robotics.learning.task_explore.tf_agents.policies import greedy_policy\nimport google3.robotics.learning.task_explore.tf_agents.preprocessors as google314\nfrom google3.robotics.learning.task_explore.tf_agents.sequence import sequence_agent\nfrom google3.robotics.learning.task_explore.tf_agents.sequence import transformer_network\nfrom google3.robotics.learning.task_explore.tf_agents import tf_agents_trainer\nimport google3.robotics.learning.task_explore.utils.gin_string_functions as google315\nfrom robotics_transformer.film_efficientnet import preprocessors\nfrom tensor2robot.preprocessors import image_transformations\nimport tensor2robot.utils.tensorspec_utils\nimport tensorflow as tf\nfrom tf_agents.google.xm import environment_specs\nfrom tf_agents.policies import actor_policy\nimport tf_agents.policies.samplers.qtopt_cem_actions_sampler_continuous_and_one_hot\nfrom tf_agents.train import learner\n\n# Macros:\n# ==============================================================================\nACTION_ORDER = \\\n    [\'terminate_episode\',\n     \'world_vector\',\n     \'rotation_delta\',\n     \'gripper_closedness_action\',\n     \'base_displacement_vertical_rotation\',\n     \'base_displacement_vector\']\nACTOR_NETWORK = @transformer_network.TransformerNetwork\nACTOR_OPTIMIZER = @actor_optimizer/tf.keras.optimizers.Adam()\nBASE_TRANSFORM_NAMES = \\\n    [\'mk1_500tasks_te_real_without_filters\',\n     \'jaco_play_without_filters\',\n     \'bridge_without_filters\',\n     \'berkeley_cable_routing_without_filters\',\n     \'kuka_filters_positive\',\n     \'language_table_kona_without_filters\',\n     \'roboturk_without_filters\',\n     \'viola_without_filters\',\n     \'nyu_door_opening_surprising_effectiveness_without_filters\',\n     \'berkeley_autolab_ur5_without_filters\',\n     \'taco_play_without_filters\',\n     \'toto_without_filters\']\nBATCH_SIZE = 8\nCROP_SIZE = 236\nDATASET_NAMES = \\\n    [\'fractal.mk1_500tasks_te_real\',\n     \'rlds.jaco_play\',\n     \'rlds.bridge\',\n     \'rlds.berkeley_cable_routing\',\n     \'rlds.kuka\',\n     \'language_table_augmented.language_table_kona\',\n     \'rlds.roboturk\',\n     \'rlds.viola\',\n     \'rlds.nyu_door_opening_surprising_effectiveness\',\n     \'rlds.berkeley_autolab_ur5\',\n     \'rlds.taco_play\',\n     \'rlds.toto\']\nDATASET_WEIGHTS = [150, 20, 50, 20, 20, 30, 10, 3, 5, 5, 5, 5]\nDEBUG_SUMMARIES = True\nLEARNING_RATE_ACTOR = 0.0001\nLOG_SUMMARY_INTERVAL = 280\nMAX_TRAINING_STEPS = 45000\nNUM_SHARDS_REVERB = 40\nPOLICY_CHECKPOINT_INTERVAL = 280\nREPLAY_BUFFER_BACKEND = None\nSEQUENCE_LENGTH = 15\nSPLITS = [\'train\']\nTF_AGENT_CLASS = @sequence_agent.SequenceAgent\nTRAIN_CHECKPOINT_INTERVAL = 280\nTRAIN_DIR = \\\n    \'/cns/ib-d/home/quanhovuong-brain/quanhovuong/rs=6.3:sl=48M/ttl=12w:gc=0/xm_fractal_experiment_59470521/1/train\'\nTRAIN_LOG_DIR = \\\n    \'/cns/ib-d/home/quanhovuong-brain/quanhovuong/rs=6.3:sl=48M/ttl=12w:gc=0/xm_fractal_experiment_59470521/1/train\'\nTRAIN_TABLE_NAME = None\nUSE_TCL = False\nVOCAB_SIZE = 512\nWORLD_VECTOR_LIMIT = 2.0\n\n# Parameters for google314._distort_proxy_images:\n# ==============================================================================\ngoogle314._distort_proxy_images.all_augs = False\ngoogle314._distort_proxy_images.use_cutout = False\n\n# Parameters for environment_specs.action_spec:\n# ==============================================================================\n# None.\n\n# Parameters for actor_policy.ActorPolicy:\n# ==============================================================================\nactor_policy.ActorPolicy.clip = True\nactor_policy.ActorPolicy.name = None\nactor_policy.ActorPolicy.observation_and_action_constraint_splitter = None\nactor_policy.ActorPolicy.observation_normalizer = None\nactor_policy.ActorPolicy.policy_state_spec = ()\n\n# Parameters for actor_optimizer/tf.keras.optimizers.Adam:\n# ==============================================================================\nactor_optimizer/tf.keras.optimizers.Adam.amsgrad = False\nactor_optimizer/tf.keras.optimizers.Adam.beta_1 = 0.9\nactor_optimizer/tf.keras.optimizers.Adam.beta_2 = 0.999\nactor_optimizer/tf.keras.optimizers.Adam.clipnorm = None\nactor_optimizer/tf.keras.optimizers.Adam.clipvalue = None\nactor_optimizer/tf.keras.optimizers.Adam.ema_momentum = 0.99\nactor_optimizer/tf.keras.optimizers.Adam.ema_overwrite_frequency = None\nactor_optimizer/tf.keras.optimizers.Adam.epsilon = 1e-07\nactor_optimizer/tf.keras.optimizers.Adam.global_clipnorm = None\nactor_optimizer/tf.keras.optimizers.Adam.jit_compile = True\nactor_optimizer/tf.keras.optimizers.Adam.learning_rate = %LEARNING_RATE_ACTOR\nactor_optimizer/tf.keras.optimizers.Adam.name = \'Adam\'\nactor_optimizer/tf.keras.optimizers.Adam.use_ema = False\nactor_optimizer/tf.keras.optimizers.Adam.weight_decay = None\n\n# Parameters for image_transformations.ApplyPhotometricImageDistortions:\n# ==============================================================================\nimage_transformations.ApplyPhotometricImageDistortions.lower_contrast = 0.5\nimage_transformations.ApplyPhotometricImageDistortions.lower_saturation = 0.5\nimage_transformations.ApplyPhotometricImageDistortions.max_delta_brightness = 0.125\nimage_transformations.ApplyPhotometricImageDistortions.max_delta_hue = 0.2\nimage_transformations.ApplyPhotometricImageDistortions.random_noise_apply_probability = \\\n    0.5\nimage_transformations.ApplyPhotometricImageDistortions.upper_contrast = 1.5\nimage_transformations.ApplyPhotometricImageDistortions.upper_saturation = 1.5\n\n# Parameters for preprocessors.convert_dtype_and_crop_images:\n# ==============================================================================\n# None.\n\n# Parameters for tf_agents_trainer.create_agent_and_specs:\n# ==============================================================================\n# None.\n\n# Parameters for tf_agents_trainer.create_and_mix_rds_datasets:\n# ==============================================================================\ntf_agents_trainer.create_and_mix_rds_datasets.fewer_datasets_than_weights = False\ntf_agents_trainer.create_and_mix_rds_datasets.prefetch = True\ntf_agents_trainer.create_and_mix_rds_datasets.sample_from_datasets = True\n\n# Parameters for google33.create_model_spec:\n# ==============================================================================\n# None.\n\n# Parameters for google34.create_rds_episode_input_pipelines:\n# ==============================================================================\ngoogle34.create_rds_episode_input_pipelines.rds_dataset_names = %DATASET_NAMES\ngoogle34.create_rds_episode_input_pipelines.splits = %SPLITS\ngoogle34.create_rds_episode_input_pipelines.traj_transforms_names = \\\n    @google32.infer_trajectory_transform_names()\n\n# Parameters for google34.create_rds_input_pipeline:\n# ==============================================================================\ngoogle34.create_rds_input_pipeline.episode_ds_pipeline_fn = None\ngoogle34.create_rds_input_pipeline.repeat = True\ngoogle34.create_rds_input_pipeline.split_slice = \'\'\ngoogle34.create_rds_input_pipeline.use_replicated = True\n\n# Parameters for google34.create_rds_input_pipeline_for_registered_trajectory_transform:\n# ==============================================================================\ngoogle34.create_rds_input_pipeline_for_registered_trajectory_transform.allow_read_cached = \\\n    True\n\n# Parameters for google34.create_rds_input_pipelines_for_registered_trajectory_transforms:\n# ==============================================================================\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.episode_shuffle_buffer_size = \\\n    1\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.rds_dataset_names = \\\n    %DATASET_NAMES\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.splits = \\\n    %SPLITS\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.traj_shuffle_buffer_size = \\\n    3328\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.traj_transforms_names = \\\n    @google32.infer_trajectory_transform_names()\n\n# Parameters for google35.create_traj_transform_bc_transformer:\n# ==============================================================================\ngoogle35.create_traj_transform_bc_transformer.spec_name = \'FractalRLDSSpecLean\'\ngoogle35.create_traj_transform_bc_transformer.use_half_transition = True\ngoogle35.create_traj_transform_bc_transformer.wrapper = None\n\n# Parameters for google35.create_traj_transform_bc_transformer_with_filters:\n# ==============================================================================\ngoogle35.create_traj_transform_bc_transformer_with_filters.spec_name = \\\n    \'FractalRLDSSpecLean\'\ngoogle35.create_traj_transform_bc_transformer_with_filters.step_filters = None\ngoogle35.create_traj_transform_bc_transformer_with_filters.use_half_transition = \\\n    True\n\n# Parameters for pretrained_efficientnet_encoder.EfficientNetEncoder:\n# ==============================================================================\npretrained_efficientnet_encoder.EfficientNetEncoder.freeze = False\npretrained_efficientnet_encoder.EfficientNetEncoder.include_top = False\npretrained_efficientnet_encoder.EfficientNetEncoder.model_variant = \'b3\'\npretrained_efficientnet_encoder.EfficientNetEncoder.weights = \'imagenet\'\n\n# Parameters for add_split_to_dataset/google315.elementwise_string_join:\n# ==============================================================================\nadd_split_to_dataset/google315.elementwise_string_join.left = %DATASET_NAMES\nadd_split_to_dataset/google315.elementwise_string_join.right = \\\n    @add_split_to_dataset/google34.infer_split()\nadd_split_to_dataset/google315.elementwise_string_join.separator = \'_\'\n\n# Parameters for add_traj_transform_to_dataset/google315.elementwise_string_join:\n# ==============================================================================\nadd_traj_transform_to_dataset/google315.elementwise_string_join.left = \\\n    @add_split_to_dataset/google315.elementwise_string_join()\nadd_traj_transform_to_dataset/google315.elementwise_string_join.right = \\\n    @google32.infer_trajectory_transform_names()\nadd_traj_transform_to_dataset/google315.elementwise_string_join.separator = \'_\'\n\n# Parameters for google35.episode_to_steps:\n# ==============================================================================\ngoogle35.episode_to_steps.discount_rate = 0.98\ngoogle35.episode_to_steps.experience_sequence_length = 6\ngoogle35.episode_to_steps.learn_terminate_action = False\ngoogle35.episode_to_steps.step_filters = None\ngoogle35.episode_to_steps.use_goal_image = False\n\n# Parameters for action_tokenizer.FractalActionTokenizer:\n# ==============================================================================\n# None.\n\n# Parameters for observation_tokenizer.FractalObservationTokenizer:\n# ==============================================================================\nobservation_tokenizer.FractalObservationTokenizer.image_length = None\nobservation_tokenizer.FractalObservationTokenizer.image_width = None\nobservation_tokenizer.FractalObservationTokenizer.num_context_tokens = 1\nobservation_tokenizer.FractalObservationTokenizer.num_token_per_image = 8\nobservation_tokenizer.FractalObservationTokenizer.prepend_context_to_image = False\n\n# Parameters for google33.get_action_spec:\n# ==============================================================================\ngoogle33.get_action_spec.world_vector_limit = %WORLD_VECTOR_LIMIT\n\n# Parameters for google33.get_additional_state_images:\n# ==============================================================================\n# None.\n\n# Parameters for google33.get_additional_state_robot_states:\n# ==============================================================================\n# None.\n\n# Parameters for google33.get_navigation_spec:\n# ==============================================================================\ngoogle33.get_navigation_spec.base_displacement_vector_limit = 1.0\ngoogle33.get_navigation_spec.base_displacement_vertical_rotation_limit = \\\n    3.141592653589793\ngoogle33.get_navigation_spec.include_terminate = False\ngoogle33.get_navigation_spec.return_action_order_only = False\n\n# Parameters for google33.get_spec:\n# ==============================================================================\n# None.\n\n# Parameters for greedy_policy.GreedyPolicy:\n# ==============================================================================\ngreedy_policy.GreedyPolicy.name = None\n\n# Parameters for google34.infer_split:\n# ==============================================================================\ngoogle34.infer_split.default_split = \'train\'\n\n# Parameters for add_split_to_dataset/google34.infer_split:\n# ==============================================================================\nadd_split_to_dataset/google34.infer_split.default_split = \'train\'\nadd_split_to_dataset/google34.infer_split.rds_dataset_names = %DATASET_NAMES\nadd_split_to_dataset/google34.infer_split.splits = %SPLITS\n\n# Parameters for google32.infer_trajectory_transform_names:\n# ==============================================================================\ngoogle32.infer_trajectory_transform_names.base_transform_names = \\\n    %BASE_TRANSFORM_NAMES\ngoogle32.infer_trajectory_transform_names.experience_sequence_length = \\\n    %SEQUENCE_LENGTH\n\n# Parameters for add_traj_transform_to_dataset/google32.infer_trajectory_transform_names:\n# ==============================================================================\nadd_traj_transform_to_dataset/google32.infer_trajectory_transform_names.base_transform_names = \\\n    %BASE_TRANSFORM_NAMES\nadd_traj_transform_to_dataset/google32.infer_trajectory_transform_names.experience_sequence_length = \\\n    %SEQUENCE_LENGTH\n\n# Parameters for learner.Learner:\n# ==============================================================================\nlearner.Learner.after_train_strategy_step_fn = None\nlearner.Learner.experience_dataset_options = None\nlearner.Learner.max_checkpoints_to_keep = 3\nlearner.Learner.strategy_run_options = None\nlearner.Learner.summary_root_dir = None\nlearner.Learner.triggers = None\nlearner.Learner.use_kwargs_in_agent_train = False\n\n# Parameters for language_table.map_action_real:\n# ==============================================================================\nlanguage_table.map_action_real.world_vector_limit = %WORLD_VECTOR_LIMIT\n\n# Parameters for google314.ProxyPreProcessor:\n# ==============================================================================\ngoogle314.ProxyPreProcessor.image_history_len = None\n\n# Parameters for google3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder:\n# ==============================================================================\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.filter_negative_rewards = \\\n    False\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.filter_small_action_threshold = \\\n    None\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.fractal_dataset_registry_legacy_dataset_name = \\\n    None\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.fractal_project_name_pattern = \\\n    None\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.positive_filter = \\\n    False\n\n# Parameters for sequence_agent.SequenceAgent:\n# ==============================================================================\nsequence_agent.SequenceAgent.action_spec = @environment_specs.action_spec()\nsequence_agent.SequenceAgent.actor_network = %ACTOR_NETWORK\nsequence_agent.SequenceAgent.actor_optimizer = %ACTOR_OPTIMIZER\nsequence_agent.SequenceAgent.debug_summaries = %DEBUG_SUMMARIES\nsequence_agent.SequenceAgent.time_sequence_length = %SEQUENCE_LENGTH\nsequence_agent.SequenceAgent.time_step_spec = @environment_specs.time_step_spec()\nsequence_agent.SequenceAgent.use_tcl = %USE_TCL\n\n# Parameters for google3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.small_actions_step_filter:\n# ==============================================================================\n# None.\n\n# Parameters for add_split_to_dataset/google315.string_join:\n# ==============================================================================\n# None.\n\n# Parameters for add_traj_transform_to_dataset/google315.string_join:\n# ==============================================================================\n# None.\n\n# Parameters for google35.tfa_transition_builder:\n# ==============================================================================\n# None.\n\n# Parameters for environment_specs.time_step_spec:\n# ==============================================================================\n# None.\n\n# Parameters for tf_agents_trainer.train:\n# ==============================================================================\ntf_agents_trainer.train.batch_size = %BATCH_SIZE\ntf_agents_trainer.train.data_spec_create_fn = @google33.create_model_spec\ntf_agents_trainer.train.data_spec_path = \\\n    \'/cns/ib-d/home/quanhovuong-brain/quanhovuong/rs=6.3:sl=48M/ttl=12w:gc=0/xm_fractal_experiment_59470521/1/train/collect_policy/environment_specs.textproto\'\ntf_agents_trainer.train.enable_xla = False\ntf_agents_trainer.train.experience_sequence_length = %SEQUENCE_LENGTH\ntf_agents_trainer.train.flywheel_tick_counter_ctor = None\ntf_agents_trainer.train.in_graph_bellman_update = False\ntf_agents_trainer.train.log_interval = %LOG_SUMMARY_INTERVAL\ntf_agents_trainer.train.num_shards = %NUM_SHARDS_REVERB\ntf_agents_trainer.train.number_training_steps = %MAX_TRAINING_STEPS\ntf_agents_trainer.train.policy_checkpoint_interval = %POLICY_CHECKPOINT_INTERVAL\ntf_agents_trainer.train.pull_buffer_names = \\\n    @add_traj_transform_to_dataset/google315.elementwise_string_join()\ntf_agents_trainer.train.pull_buffer_weights = %DATASET_WEIGHTS\ntf_agents_trainer.train.rds_datasets_create_fn = \\\n    @google34.create_rds_input_pipelines_for_registered_trajectory_transforms()\ntf_agents_trainer.train.rds_episode_datasets_create_fn = \\\n    @google34.create_rds_episode_input_pipelines()\ntf_agents_trainer.train.replay_buffer_backend = %REPLAY_BUFFER_BACKEND\ntf_agents_trainer.train.replay_buffer_name = %TRAIN_TABLE_NAME\ntf_agents_trainer.train.saved_model_policy_wrapper = None\ntf_agents_trainer.train.summary_episode_replay_buffer_names = \\\n    @add_traj_transform_to_dataset/google315.elementwise_string_join()\ntf_agents_trainer.train.summary_interval = %LOG_SUMMARY_INTERVAL\ntf_agents_trainer.train.tf_agent_class = %TF_AGENT_CLASS\ntf_agents_trainer.train.train_checkpoint_interval = %TRAIN_CHECKPOINT_INTERVAL\ntf_agents_trainer.train.train_dir = %TRAIN_DIR\ntf_agents_trainer.train.train_log_dir = %TRAIN_LOG_DIR\ntf_agents_trainer.train.train_summary = False\ntf_agents_trainer.train.train_summary_episode = False\ntf_agents_trainer.train.train_summary_load_latest_ckpt = True\ntf_agents_trainer.train.warm_start_dir = None\n\n# Parameters for transformer_network.TransformerNetwork:\n# ==============================================================================\ntransformer_network.TransformerNetwork.action_order = %ACTION_ORDER\ntransformer_network.TransformerNetwork.action_scales = [%VOCAB_SIZE]\ntransformer_network.TransformerNetwork.continuous_robot_state_features = ()\ntransformer_network.TransformerNetwork.crop_size = %CROP_SIZE\ntransformer_network.TransformerNetwork.dropout_rate = 0.1\ntransformer_network.TransformerNetwork.feed_forward_size = 512\ntransformer_network.TransformerNetwork.image_patch_size = 16\ntransformer_network.TransformerNetwork.image_position_embedding = \\\n    %google3.robotics.learning.task_explore.tasks.fractal.tf_agents.observation_tokenizer.PositionEmbeddingType.NONE\ntransformer_network.TransformerNetwork.image_token_encoder = \\\n    %google3.robotics.learning.task_explore.tasks.fractal.tf_agents.observation_tokenizer.EncoderType.FILM_PRETRAINED_EFFICIENT_NET\ntransformer_network.TransformerNetwork.images_to_use = (\'image\',)\ntransformer_network.TransformerNetwork.include_prev_timesteps_actions = False\ntransformer_network.TransformerNetwork.include_same_timestep_prev_action_dimensions = \\\n    False\ntransformer_network.TransformerNetwork.inference_time_return_discount_factor = 0.98\ntransformer_network.TransformerNetwork.layer_size = 256\ntransformer_network.TransformerNetwork.num_heads = 8\ntransformer_network.TransformerNetwork.num_layers = 8\ntransformer_network.TransformerNetwork.output_tokens_per_frame = None\ntransformer_network.TransformerNetwork.return_attention_scores = False\ntransformer_network.TransformerNetwork.return_optimality_weight = 0.1\ntransformer_network.TransformerNetwork.return_top_percentile = 85\ntransformer_network.TransformerNetwork.return_vocab_size = 128\ntransformer_network.TransformerNetwork.stack_images = False\ntransformer_network.TransformerNetwork.state_features = None\ntransformer_network.TransformerNetwork.tcl_weight = 0.05\ntransformer_network.TransformerNetwork.token_embedding_size = 512\ntransformer_network.TransformerNetwork.use_token_learner = False\ntransformer_network.TransformerNetwork.vocab_size = %VOCAB_SIZE\n"