zjowowen commited on
Commit
92d4087
1 Parent(s): fdfb8cd

Upload policy_config.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. policy_config.py +116 -0
policy_config.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exp_config = {
2
+ 'main_config': {
3
+ 'exp_name': 'TicTacToe-play-with-bot-MuZero',
4
+ 'seed': 0,
5
+ 'env': {
6
+ 'env_id': 'TicTacToe-play-with-bot',
7
+ 'battle_mode': 'play_with_bot_mode',
8
+ 'collector_env_num': 8,
9
+ 'evaluator_env_num': 5,
10
+ 'n_evaluator_episode': 5,
11
+ 'manager': {
12
+ 'shared_memory': False
13
+ }
14
+ },
15
+ 'policy': {
16
+ 'on_policy': False,
17
+ 'cuda': True,
18
+ 'multi_gpu': False,
19
+ 'bp_update_sync': True,
20
+ 'traj_len_inf': False,
21
+ 'model': {
22
+ 'observation_shape': [3, 3, 3],
23
+ 'action_space_size': 9,
24
+ 'image_channel': 3,
25
+ 'num_res_blocks': 1,
26
+ 'num_channels': 16,
27
+ 'fc_reward_layers': [8],
28
+ 'fc_value_layers': [8],
29
+ 'fc_policy_layers': [8],
30
+ 'support_scale': 10,
31
+ 'reward_support_size': 21,
32
+ 'value_support_size': 21,
33
+ 'norm_type': 'BN'
34
+ },
35
+ 'use_rnd_model': False,
36
+ 'sampled_algo': False,
37
+ 'gumbel_algo': False,
38
+ 'mcts_ctree': True,
39
+ 'collector_env_num': 8,
40
+ 'evaluator_env_num': 5,
41
+ 'env_type': 'board_games',
42
+ 'action_type': 'varied_action_space',
43
+ 'battle_mode': 'play_with_bot_mode',
44
+ 'monitor_extra_statistics': True,
45
+ 'game_segment_length': 5,
46
+ 'transform2string': False,
47
+ 'gray_scale': False,
48
+ 'use_augmentation': False,
49
+ 'augmentation': ['shift', 'intensity'],
50
+ 'ignore_done': False,
51
+ 'update_per_collect': 50,
52
+ 'model_update_ratio': 0.1,
53
+ 'batch_size': 256,
54
+ 'optim_type': 'Adam',
55
+ 'learning_rate': 0.003,
56
+ 'target_update_freq': 100,
57
+ 'target_update_freq_for_intrinsic_reward': 1000,
58
+ 'weight_decay': 0.0001,
59
+ 'momentum': 0.9,
60
+ 'grad_clip_value': 0.5,
61
+ 'n_episode': 8,
62
+ 'num_simulations': 25,
63
+ 'discount_factor': 1,
64
+ 'td_steps': 9,
65
+ 'num_unroll_steps': 3,
66
+ 'reward_loss_weight': 1,
67
+ 'value_loss_weight': 0.25,
68
+ 'policy_loss_weight': 1,
69
+ 'policy_entropy_loss_weight': 0,
70
+ 'ssl_loss_weight': 0,
71
+ 'lr_piecewise_constant_decay': False,
72
+ 'threshold_training_steps_for_final_lr': 50000,
73
+ 'manual_temperature_decay': False,
74
+ 'threshold_training_steps_for_final_temperature': 100000,
75
+ 'fixed_temperature_value': 0.25,
76
+ 'use_ture_chance_label_in_chance_encoder': False,
77
+ 'use_priority': True,
78
+ 'priority_prob_alpha': 0.6,
79
+ 'priority_prob_beta': 0.4,
80
+ 'root_dirichlet_alpha': 0.3,
81
+ 'root_noise_weight': 0.25,
82
+ 'random_collect_episode_num': 0,
83
+ 'eps': {
84
+ 'eps_greedy_exploration_in_collect': False,
85
+ 'type': 'linear',
86
+ 'start': 1.0,
87
+ 'end': 0.05,
88
+ 'decay': 100000
89
+ },
90
+ 'cfg_type': 'MuZeroPolicyDict',
91
+ 'reanalyze_ratio': 0.0,
92
+ 'eval_freq': 2000,
93
+ 'replay_buffer_size': 10000
94
+ },
95
+ 'wandb_logger': {
96
+ 'gradient_logger': False,
97
+ 'video_logger': False,
98
+ 'plot_logger': False,
99
+ 'action_logger': False,
100
+ 'return_logger': False
101
+ }
102
+ },
103
+ 'create_config': {
104
+ 'env': {
105
+ 'type': 'tictactoe',
106
+ 'import_names': ['zoo.board_games.tictactoe.envs.tictactoe_env']
107
+ },
108
+ 'env_manager': {
109
+ 'type': 'subprocess'
110
+ },
111
+ 'policy': {
112
+ 'type': 'muzero',
113
+ 'import_names': ['lzero.policy.muzero']
114
+ }
115
+ }
116
+ }