Skip to content

Commit

Permalink
Merge pull request #334 from DeNA/develop
Browse files Browse the repository at this point in the history
(2022/11-12) merge develop into master
  • Loading branch information
YuriCat committed Feb 26, 2023
2 parents 8fb63a3 + cf2ef02 commit 6bdcd87
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 20 deletions.
27 changes: 16 additions & 11 deletions handyrl/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,38 +17,40 @@ def monte_carlo(values, returns):
return returns, returns - values


def temporal_difference(values, returns, rewards, lmb, gamma):
def temporal_difference(values, returns, rewards, lambda_, gamma):
target_values = deque([returns[:, -1]])
for i in range(values.size(1) - 2, -1, -1):
reward = rewards[:, i] if rewards is not None else 0
target_values.appendleft(reward + gamma * ((1 - lmb) * values[:, i + 1] + lmb * target_values[0]))
lamb = lambda_[:, i + 1]
target_values.appendleft(reward + gamma * ((1 - lamb) * values[:, i + 1] + lamb * target_values[0]))

target_values = torch.stack(tuple(target_values), dim=1)

return target_values, target_values - values


def upgo(values, returns, rewards, lmb, gamma):
def upgo(values, returns, rewards, lambda_, gamma):
target_values = deque([returns[:, -1]])
for i in range(values.size(1) - 2, -1, -1):
value = values[:, i + 1]
reward = rewards[:, i] if rewards is not None else 0
target_values.appendleft(reward + gamma * torch.max(value, (1 - lmb) * value + lmb * target_values[0]))
lamb = lambda_[:, i + 1]
target_values.appendleft(reward + gamma * torch.max(value, (1 - lamb) * value + lamb * target_values[0]))

target_values = torch.stack(tuple(target_values), dim=1)

return target_values, target_values - values


def vtrace(values, returns, rewards, lmb, gamma, rhos, cs):
def vtrace(values, returns, rewards, lambda_, gamma, rhos, cs):
rewards = rewards if rewards is not None else 0
values_t_plus_1 = torch.cat([values[:, 1:], returns[:, -1:]], dim=1)
deltas = rhos * (rewards + gamma * values_t_plus_1 - values)

# compute Vtrace value target recursively
vs_minus_v_xs = deque([deltas[:, -1]])
for i in range(values.size(1) - 2, -1, -1):
vs_minus_v_xs.appendleft(deltas[:, i] + gamma * lmb * cs[:, i] * vs_minus_v_xs[0])
vs_minus_v_xs.appendleft(deltas[:, i] + gamma * lambda_[:, i + 1] * cs[:, i] * vs_minus_v_xs[0])

vs_minus_v_xs = torch.stack(tuple(vs_minus_v_xs), dim=1)
vs = vs_minus_v_xs + values
Expand All @@ -58,18 +60,21 @@ def vtrace(values, returns, rewards, lmb, gamma, rhos, cs):
return vs, advantages


def compute_target(algorithm, values, returns, rewards, lmb, gamma, rhos, cs):
def compute_target(algorithm, values, returns, rewards, lmb, gamma, rhos, cs, masks):
if values is None:
# In the absence of a baseline, Monte Carlo returns are used.
return returns, returns

if algorithm == 'MC':
return monte_carlo(values, returns)
elif algorithm == 'TD':
return temporal_difference(values, returns, rewards, lmb, gamma)

lambda_ = lmb + (1 - lmb) * (1 - masks)

if algorithm == 'TD':
return temporal_difference(values, returns, rewards, lambda_, gamma)
elif algorithm == 'UPGO':
return upgo(values, returns, rewards, lmb, gamma)
return upgo(values, returns, rewards, lambda_, gamma)
elif algorithm == 'VTRACE':
return vtrace(values, returns, rewards, lmb, gamma, rhos, cs)
return vtrace(values, returns, rewards, lambda_, gamma, rhos, cs)
else:
print('No algorithm named %s' % algorithm)
23 changes: 16 additions & 7 deletions handyrl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,9 @@ def compute_loss(batch, model, hidden, args):

actions = batch['action']
emasks = batch['episode_mask']
omasks = batch['observation_mask']
value_target_masks, return_target_masks = omasks, omasks

clip_rho_threshold, clip_c_threshold = 1.0, 1.0

log_selected_b_policies = torch.log(torch.clamp(batch['selected_prob'], 1e-16, 1)) * emasks
Expand All @@ -239,16 +242,18 @@ def compute_loss(batch, model, hidden, args):
if 'value' in outputs_nograd:
values_nograd = outputs_nograd['value']
if args['turn_based_training'] and values_nograd.size(2) == 2: # two player zerosum game
values_nograd_opponent = -torch.stack([values_nograd[:, :, 1], values_nograd[:, :, 0]], dim=2)
values_nograd = (values_nograd + values_nograd_opponent) / (batch['observation_mask'].sum(dim=2, keepdim=True) + 1e-8)
values_nograd_opponent = -torch.flip(values_nograd, dims=[2])
omasks_opponent = torch.flip(omasks, dims=[2])
values_nograd = (values_nograd * omasks + values_nograd_opponent * omasks_opponent) / (omasks + omasks_opponent + 1e-8)
value_target_masks = torch.clamp(omasks + omasks_opponent, 0, 1)
outputs_nograd['value'] = values_nograd * emasks + batch['outcome'] * (1 - emasks)

# compute targets and advantage
targets = {}
advantages = {}

value_args = outputs_nograd.get('value', None), batch['outcome'], None, args['lambda'], 1, clipped_rhos, cs
return_args = outputs_nograd.get('return', None), batch['return'], batch['reward'], args['lambda'], args['gamma'], clipped_rhos, cs
value_args = outputs_nograd.get('value', None), batch['outcome'], None, args['lambda'], 1, clipped_rhos, cs, value_target_masks
return_args = outputs_nograd.get('return', None), batch['return'], batch['reward'], args['lambda'], args['gamma'], clipped_rhos, cs, return_target_masks

targets['value'], advantages['value'] = compute_target(args['value_target'], *value_args)
targets['return'], advantages['return'] = compute_target(args['value_target'], *return_args)
Expand Down Expand Up @@ -289,9 +294,13 @@ def select_episode(self):
ep_count = min(len(self.episodes), self.args['maximum_episodes'])
ep_idx = random.randrange(ep_count)
accept_rate = 1 - (ep_count - 1 - ep_idx) / ep_count
if random.random() < accept_rate:
if random.random() >= accept_rate:
continue
try:
ep = self.episodes[ep_idx]
break
ep = self.episodes[ep_idx]
except IndexError:
continue
turn_candidates = 1 + max(0, ep['steps'] - self.args['forward_steps']) # change start turn by sequence length
train_st = random.randrange(turn_candidates)
st = max(0, train_st - self.args['burn_in_steps'])
Expand Down Expand Up @@ -428,7 +437,7 @@ def __init__(self, args, net=None, remote=False):
self.worker = WorkerServer(args) if remote else WorkerCluster(args)

# thread connection
self.trainer = Trainer(args, self.model)
self.trainer = Trainer(args, copy.deepcopy(self.model))

def model_path(self, model_id):
return os.path.join('models', str(model_id) + '.pth')
Expand Down
5 changes: 3 additions & 2 deletions scripts/win_rate_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,9 @@ def get_wp_list(path):
for opponent in opponents:
wp_list = averaged_wp_lists[opponent]
start = start_epoch[opponent]
# ax.plot(clipped_epoch_list[start:], wp_list[start:], label=opponent)
ax.plot(clipped_game_list[start:], wp_list[start:], label=opponent)
end = min(min(len(clipped_epoch_list), len(clipped_game_list)), len(wp_list))
# ax.plot(clipped_epoch_list[start:end], wp_list[start:end], label=opponent)
ax.plot(clipped_game_list[start:end], wp_list[start:end], label=opponent)
last_win_rate[opponent] = wp_list[-1]

ax.set_xlabel('Games', size=14)
Expand Down

0 comments on commit 6bdcd87

Please sign in to comment.