20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195 | def train_main():
"""Main training CLI entry point."""
parser = argparse.ArgumentParser(description='Train RL agents')
parser.add_argument('--env', choices=['snake'], default='snake', help='Environment to train on')
parser.add_argument(
'--alg',
choices=['reinforce', 'dqn', 'ppo'],
default='reinforce',
help='Reinforcement learning algorithm',
)
parser.add_argument('--episodes', type=int, default=1000, help='Number of training episodes')
parser.add_argument('--grid-size', type=int, default=15, help='Grid size for snake environment')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--gamma', type=float, default=0.99, help='Discount factor')
parser.add_argument('--hidden-size', type=int, default=128, help='Hidden layer size')
parser.add_argument('--num-hidden-layers', type=int, default=2, help='Number of hidden layers')
parser.add_argument('--seed', type=int, default=42, help='Random seed')
parser.add_argument('--name', type=str, help='Experiment name (auto-generated if not provided)')
parser.add_argument('--render-interval', type=int, help='Render every N episodes')
parser.add_argument('--save-interval', type=int, help='Save model every N episodes (optional)')
parser.add_argument('--device', choices=['cpu', 'cuda'], default='auto', help='Device to use')
parser.add_argument(
'--epsilon-start', type=float, default=1.0, help='Initial exploration rate (DQN)'
)
parser.add_argument(
'--epsilon-end', type=float, default=0.01, help='Final exploration rate (DQN)'
)
parser.add_argument(
'--epsilon-decay', type=float, default=0.995, help='Exploration decay rate (DQN)'
)
parser.add_argument('--memory-size', type=int, default=10000, help='Replay memory size (DQN)')
parser.add_argument('--batch-size', type=int, default=64, help='Batch size for training')
parser.add_argument(
'--target-update', type=int, default=10, help='Target network update frequency (DQN)'
)
parser.add_argument('--clip-epsilon', type=float, default=0.2, help='PPO clipping parameter')
parser.add_argument('--ppo-epochs', type=int, default=4, help='PPO epochs per update')
parser.add_argument('--gae-lambda', type=float, default=0.95, help='GAE lambda parameter (PPO)')
parser.add_argument(
'--value-loss-coef', type=float, default=0.5, help='Value loss coefficient (PPO)'
)
parser.add_argument(
'--entropy-coef', type=float, default=0.01, help='Entropy coefficient (PPO)'
)
parser.add_argument('--max-grad-norm', type=float, default=0.5, help='Max gradient norm (PPO)')
parser.add_argument('--max-steps', type=int, default=1000, help='Maximum steps per episode')
parser.add_argument('--eval-episodes', type=int, default=10, help='Evaluation episodes')
parser.add_argument(
'--eval-interval', type=int, default=100, help='Evaluation interval during training'
)
parser.add_argument(
'--log-interval', type=int, default=1000, help='Log summary interval during training'
)
parser.add_argument(
'--trajectory-batch',
type=int,
default=8,
help='Number of trajectories to collect per training iteration',
)
parser.add_argument('--no-eval', action='store_true', help='Skip evaluation after training')
parser.add_argument('--quiet', action='store_true', help='Suppress training progress output')
parser.add_argument('--wandb', action='store_true', help='Enable Weights & Biases logging')
args = parser.parse_args()
# Set random seed
set_seed(args.seed)
# Get device
device = get_device() if args.device == 'auto' else torch.device(args.device)
logger.info(f'Using device: {device}')
# Create environment
if args.env == 'snake':
env = SnakeGameEnv(grid_size=args.grid_size)
state_size = args.grid_size * args.grid_size
action_size = 4
else:
raise ValueError(f'Unknown environment: {args.env}')
# Create agent
base_params = {
'state_size': state_size,
'action_size': action_size,
'learning_rate': args.lr,
'gamma': args.gamma,
'hidden_size': args.hidden_size,
'num_hidden_layers': args.num_hidden_layers,
}
if args.alg == 'reinforce':
agent = REINFORCEAgent(**base_params)
elif args.alg == 'dqn':
agent = DQNAgent(
**base_params,
epsilon_start=args.epsilon_start,
epsilon_end=args.epsilon_end,
epsilon_decay=args.epsilon_decay,
memory_size=args.memory_size,
batch_size=args.batch_size,
target_update=args.target_update,
)
elif args.alg == 'ppo':
agent = PPOAgent(
**base_params,
clip_epsilon=args.clip_epsilon,
ppo_epochs=args.ppo_epochs,
lam=args.gae_lambda,
value_loss_coef=args.value_loss_coef,
entropy_coef=args.entropy_coef,
max_grad_norm=args.max_grad_norm,
batch_size=args.batch_size,
)
else:
raise ValueError(f'Unknown algorithm: {args.alg}')
# Move agent to device
if hasattr(agent, 'policy_network'):
agent.policy_network.to(device)
if hasattr(agent, 'q_network'):
agent.q_network.to(device)
agent.target_network.to(device)
if hasattr(agent, 'value_network'):
agent.value_network.to(device)
logger.info(f'Training {args.alg} agent on {args.env} environment...')
logger.info(f'Episodes: {args.episodes}')
logger.info(f'Grid size: {args.grid_size}')
logger.info(f'Learning rate: {args.lr}')
logger.info(f'Gamma: {args.gamma}')
# Create experiment with automatic directory structure
experiment_name = args.name or f'{args.alg}_{args.env}'
exp_manager = create_experiment(experiment_name)
tb_logs_dir = str(exp_manager.get_tb_logs_path())
# Configure file logging and log command line arguments
exp_manager.configure_file_logging(log_level='INFO')
exp_manager.log_command_line_args(args)
if tb_logs_dir:
logger.info(f'TensorBoard logs: {tb_logs_dir}')
# Create trainer and train agent
trainer = Trainer(
env=env,
agent=agent,
max_steps=args.max_steps,
log_dir=tb_logs_dir,
device=device,
enable_wandb=args.wandb,
wandb_config=vars(args),
run_name=experiment_name,
batch_size=args.trajectory_batch,
)
trainer.train(
num_episodes=args.episodes,
render_interval=args.render_interval,
save_interval=args.save_interval,
save_path=str(exp_manager.get_models_path()),
eval_interval=args.eval_interval,
eval_episodes=args.eval_episodes,
log_interval=args.log_interval,
verbose=not args.quiet,
)
# Save final model
models_dir = exp_manager.get_models_path()
final_model_path = str(models_dir / 'final_model.pth')
agent.save(final_model_path)
logger.success(f'Final model saved to {final_model_path}')
logger.success(f'All experiment files saved in: {exp_manager.get_experiment_path()}')
env.close()
|