Skip to content

Commit

Permalink
Merge pull request DLR-RM#1 from Antonin-Raffin/feat/a2c
Browse files Browse the repository at this point in the history
Add A2C algorithm
  • Loading branch information
araffin authored and GitHub Enterprise committed Oct 28, 2019
2 parents 3bc746c + df1e7aa commit 701daa8
Show file tree
Hide file tree
Showing 15 changed files with 274 additions and 29 deletions.
8 changes: 3 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ PyTorch version of [Stable Baselines](https://github.com/hill-a/stable-baselines

## Implemented Algorithms

- A2C
- CEM-RL (with TD3)
- PPO
- SAC
Expand All @@ -18,11 +19,8 @@ PyTorch version of [Stable Baselines](https://github.com/hill-a/stable-baselines

TODO:
- save/load
- predict
- flexible mlp
- logger
- better monitor wrapper?
- A2C
- better predict
- complete logger

Later:
- get_parameters / set_parameters
Expand Down
7 changes: 4 additions & 3 deletions tests/test_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import pytest
import numpy as np

from torchy_baselines import TD3, CEMRL, PPO, SAC
from torchy_baselines import A2C, CEMRL, PPO, SAC, TD3
from torchy_baselines.common.noise import NormalActionNoise


Expand All @@ -28,9 +28,10 @@ def test_cemrl():
os.remove("test_save.pth")


@pytest.mark.parametrize("model_class", [A2C, PPO])
@pytest.mark.parametrize("env_id", ['CartPole-v1', 'Pendulum-v0'])
def test_ppo(env_id):
model = PPO('MlpPolicy', env_id, policy_kwargs=dict(net_arch=[16]), verbose=1, create_eval_env=True)
def test_onpolicy(model_class, env_id):
model = model_class('MlpPolicy', env_id, policy_kwargs=dict(net_arch=[16]), verbose=1, create_eval_env=True)
model.learn(total_timesteps=1000, eval_freq=500)
# model.save("test_save")
# model.load("test_save")
Expand Down
3 changes: 2 additions & 1 deletion torchy_baselines/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from torchy_baselines.a2c import A2C
from torchy_baselines.cem_rl import CEMRL
from torchy_baselines.ppo import PPO
from torchy_baselines.sac import SAC
from torchy_baselines.td3 import TD3

__version__ = "0.0.4"
__version__ = "0.0.5a"
2 changes: 2 additions & 0 deletions torchy_baselines/a2c/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from torchy_baselines.a2c.a2c import A2C
from torchy_baselines.ppo.policies import MlpPolicy
126 changes: 126 additions & 0 deletions torchy_baselines/a2c/a2c.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
from gym import spaces
import torch as th
import torch.nn.functional as F

from torchy_baselines.common.utils import explained_variance
from torchy_baselines.ppo.ppo import PPO
from torchy_baselines.ppo.policies import PPOPolicy


class A2C(PPO):
"""
Advantage Actor Critic (A2C)
Paper: https://arxiv.org/abs/1602.01783
Code: This implementation borrows code from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
and Stable Baselines (https://github.com/hill-a/stable-baselines)
Introduction to A2C: https://hackernoon.com/intuitive-rl-intro-to-advantage-actor-critic-a2c-4ff545978752
:param policy: (PPOPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param learning_rate: (float or callable) The learning rate, it can be a function
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param gamma: (float) Discount factor
:param gae_lambda: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param ent_coef: (float) Entropy coefficient for the loss calculation
:param vf_coef: (float) Value function coefficient for the loss calculation
:param max_grad_norm: (float) The maximum value for the gradient clipping
:param rms_prop_eps: (float) RMSProp epsilon. It stabilizes square root computation in denominator
of RMSProp update
:param use_rms_prop: (bool) Whether to use RMSprop (default) or Adam as optimizer
:param normalize_advantage: (bool) Whether to normalize or not the advantage
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param create_eval_env: (bool) Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param seed: (int) Seed for the pseudo random generators
:param device: (str or th.device) Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
"""

def __init__(self, policy, env, learning_rate=7e-4,
n_steps=5, gamma=0.99, gae_lambda=1.0,
ent_coef=0.0, vf_coef=0.5, max_grad_norm=0.5,
rms_prop_eps=1e-5, use_rms_prop=True,
normalize_advantage=False, tensorboard_log=None, create_eval_env=False,
policy_kwargs=None, verbose=0, seed=0, device='auto',
_init_setup_model=True):

super(A2C, self).__init__(policy, env, learning_rate=learning_rate,
n_steps=n_steps, batch_size=None, n_epochs=1,
gamma=gamma, gae_lambda=gae_lambda, ent_coef=ent_coef,
vf_coef=vf_coef, max_grad_norm=max_grad_norm,
tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs,
verbose=verbose, device=device, create_eval_env=create_eval_env,
seed=seed, _init_setup_model=False)

self.normalize_advantage = normalize_advantage
self.rms_prop_eps = rms_prop_eps
self.use_rms_prop = use_rms_prop

if _init_setup_model:
self._setup_model()

def _setup_model(self):
super(A2C, self)._setup_model()
if self.use_rms_prop:
self.policy.optimizer = th.optim.RMSprop(self.policy.parameters(),
lr=self.learning_rate(1), alpha=0.99,
eps=self.rms_prop_eps, weight_decay=0)

def train(self, gradient_steps, batch_size=None):

# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# A2C with gradient_steps > 1 does not make sense
assert gradient_steps == 1
# We do not use minibatches for A2C
assert batch_size is None

for rollout_data in self.rollout_buffer.get(batch_size=None):
# Unpack
obs, action, _, _, advantage, return_batch = rollout_data

if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action for float to long
action = action.long().flatten()

# TODO: avoid second computation of everything because of the gradient
values, log_prob, entropy = self.policy.get_policy_stats(obs, action)
values = values.flatten()

# Normalize advantage (not present in the original implementation)
if self.normalize_advantage:
advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-8)

policy_loss = -(advantage * log_prob).mean()

# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(return_batch, values)

# Entropy loss favor exploration
entropy_loss = -th.mean(entropy)

loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss

# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
# approx_kl_divs.append(th.mean(old_log_prob - log_prob).detach().cpu().numpy())

# print(explained_variance(self.rollout_buffer.returns.flatten().cpu().numpy(),
# self.rollout_buffer.values.flatten().cpu().numpy()))

def learn(self, total_timesteps, callback=None, log_interval=100,
eval_env=None, eval_freq=-1, n_eval_episodes=5, tb_log_name="A2C", reset_num_timesteps=True):

return super(A2C, self).learn(total_timesteps=total_timesteps, callback=callback, log_interval=log_interval,
eval_env=eval_env, eval_freq=eval_freq, n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name, reset_num_timesteps=reset_num_timesteps)
3 changes: 2 additions & 1 deletion torchy_baselines/cem_rl/cem_rl.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def learn(self, total_timesteps, callback=None, log_interval=4,
# set params
self.actor.load_from_vector(self.es_params[i])
self.actor_target.load_from_vector(self.es_params[i])
self.actor.optimizer = th.optim.Adam(self.actor.parameters(), lr=self.learning_rate)
self.actor.optimizer = th.optim.Adam(self.actor.parameters(), lr=self.learning_rate(self._current_progress))

# In the paper: 2 * actor_steps // self.n_grad
# In the original implementation: actor_steps // self.n_grad
Expand Down Expand Up @@ -153,6 +153,7 @@ def learn(self, total_timesteps, callback=None, log_interval=4,
print("Total T: {} Episode Num: {} Episode T: {} Reward: {}".format(
self.num_timesteps, episode_num, episode_timesteps, episode_reward))

self._update_current_progress(self.num_timesteps, total_timesteps)
self.es.tell(self.es_params, self.fitnesses)
timesteps_since_eval += actor_steps
return self
Expand Down
34 changes: 33 additions & 1 deletion torchy_baselines/common/base_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import numpy as np

from torchy_baselines.common.policies import get_policy_from_name
from torchy_baselines.common.utils import set_random_seed
from torchy_baselines.common.utils import set_random_seed, get_schedule_fn, update_learning_rate
from torchy_baselines.common.vec_env import DummyVecEnv, VecEnv
from torchy_baselines.common.monitor import Monitor
from torchy_baselines.common import logger
Expand Down Expand Up @@ -57,6 +57,9 @@ def __init__(self, policy, env, policy_base, policy_kwargs=None,
self.replay_buffer = None
self.seed = seed
self.action_noise = None
# Track the training progress (from 1 to 0)
# this is used to update the learning rate
self._current_progress = 1

if env is not None:
if isinstance(env, str):
Expand Down Expand Up @@ -112,6 +115,35 @@ def unscale_action(self, scaled_action):
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))

def _setup_learning_rate(self):
"""Transform to callable if needed."""
self.learning_rate = get_schedule_fn(self.learning_rate)

def _update_current_progress(self, num_timesteps, total_timesteps):
"""
Compute current progress (from 1 to 0)
:param num_timesteps: (int)
:param total_timesteps: (int)
"""
self._current_progress = 1.0 - float(num_timesteps) / float(total_timesteps)

def _update_learning_rate(self, optimizers):
"""
Update the optimizers learning rate using the current learning rate schedule
and the current progress (from 1 to 0).
:param optimizers: ([th.optim.Optimizer] or Optimizer) An optimizer
or a list of optimizer.
"""
# Log the current learning rate
logger.logkv("learning_rate", self.learning_rate(self._current_progress))

if not isinstance(optimizers, list):
optimizers = [optimizers]
for optimizer in optimizers:
update_learning_rate(optimizer, self.learning_rate(self._current_progress))

@staticmethod
def safe_mean(arr):
"""
Expand Down
7 changes: 6 additions & 1 deletion torchy_baselines/common/buffers.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ def add(self, obs, action, reward, done, value, log_prob):
if len(log_prob.shape) == 0:
# Reshape 0-d tensor to avoid error
log_prob = log_prob.reshape(-1, 1)

self.observations[self.pos] = th.FloatTensor(np.array(obs).copy())
self.actions[self.pos] = th.FloatTensor(np.array(action).copy())
self.rewards[self.pos] = th.FloatTensor(np.array(reward).copy())
Expand All @@ -144,7 +145,7 @@ def add(self, obs, action, reward, done, value, log_prob):
if self.pos == self.buffer_size:
self.full = True

def get(self, batch_size):
def get(self, batch_size=None):
assert self.full
indices = th.randperm(self.buffer_size * self.n_envs)
# Prepare the data
Expand All @@ -154,6 +155,10 @@ def get(self, batch_size):
self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
self.generator_ready = True

# Return everything, don't create minibatches
if batch_size is None:
batch_size = self.buffer_size * self.n_envs

start_idx = 0
while start_idx < self.buffer_size * self.n_envs:
yield self._get_samples(indices[start_idx:start_idx + batch_size])
Expand Down
45 changes: 45 additions & 0 deletions torchy_baselines/common/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,48 @@ def explained_variance(y_pred, y_true):
assert y_true.ndim == 1 and y_pred.ndim == 1
var_y = np.var(y_true)
return np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y


def update_learning_rate(optimizer, learning_rate):
"""
Update the learning rate for a given optimizer.
Useful when doing linear schedule.
:param optimizer: (th.optim.Optimizer)
:param learning_rate: (float)
"""
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate


def get_schedule_fn(value_schedule):
"""
Transform (if needed) learning rate and clip range (for PPO)
to callable.
:param value_schedule: (callable or float)
:return: (function)
"""
# If the passed schedule is a float
# create a constant function
if isinstance(value_schedule, (float, int)):
# Cast to float to avoid errors
value_schedule = constant_fn(float(value_schedule))
else:
assert callable(value_schedule)
return value_schedule


def constant_fn(val):
"""
Create a function that returns a constant
It is useful for learning rate schedule (to avoid code duplication)
:param val: (float)
:return: (function)
"""

def func(_):
return val

return func
5 changes: 2 additions & 3 deletions torchy_baselines/ppo/policies.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def forward(self, features):

class PPOPolicy(BasePolicy):
def __init__(self, observation_space, action_space,
learning_rate=1e-3, net_arch=None, device='cpu',
learning_rate, net_arch=None, device='cpu',
activation_fn=nn.Tanh, adam_epsilon=1e-5, ortho_init=True):
super(PPOPolicy, self).__init__(observation_space, action_space, device)
self.obs_dim = self.observation_space.shape[0]
Expand Down Expand Up @@ -148,8 +148,7 @@ def _build(self, learning_rate):
self.value_net: 1
}[module]
module.apply(partial(self.init_weights, gain=gain))
# TODO: support linear decay of the learning rate
self.optimizer = th.optim.Adam(self.parameters(), lr=learning_rate, eps=self.adam_epsilon)
self.optimizer = th.optim.Adam(self.parameters(), lr=learning_rate(1), eps=self.adam_epsilon)

def forward(self, obs, deterministic=False):
if not isinstance(obs, th.Tensor):
Expand Down
Loading

0 comments on commit 701daa8

Please sign in to comment.