Datasets:
repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
TiKick | TiKick-main/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import os
from setuptools import setup, find_packages
import setuptools
def get_version() -> str:
# https://packaging.python.org/guides/single-sourcing-package-version/
init = open(os.path.join("tmarl", "__init__.py"), "r").read().split()
return init[init.index("__version__") + 2][1:-1]
setup(
name="tmarl", # Replace with your own username
version=get_version(),
description="marl algorithms",
long_description=open("README.md", encoding="utf8").read(),
long_description_content_type="text/markdown",
author="tmarl",
author_email="tmarl_contact@tartrl.cn",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
],
keywords="multi-agent reinforcement learning algorithms pytorch",
python_requires='>=3.6',
)
| 1,788 | 35.510204 | 74 | py |
TiKick | TiKick-main/tmarl/__init__.py | __version__ = "0.0.3" | 21 | 21 | 21 | py |
TiKick | TiKick-main/tmarl/networks/policy_network.py |
import torch
import torch.nn as nn
from tmarl.networks.utils.util import init, check
from tmarl.networks.utils.mlp import MLPBase, MLPLayer
from tmarl.networks.utils.rnn import RNNLayer
from tmarl.networks.utils.act import ACTLayer
from tmarl.networks.utils.popart import PopArt
from tmarl.utils.util import get_shape_from_obs_space
# networks are defined here
class PolicyNetwork(nn.Module):
def __init__(self, args, obs_space, action_space, device=torch.device("cpu")):
super(PolicyNetwork, self).__init__()
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_policy_active_masks = args.use_policy_active_masks
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._use_influence_policy = args.use_influence_policy
self._influence_layer_N = args.influence_layer_N
self._use_policy_vhead = args.use_policy_vhead
self._recurrent_N = args.recurrent_N
self.tpdv = dict(dtype=torch.float32, device=device)
obs_shape = get_shape_from_obs_space(obs_space)
self._mixed_obs = False
self.base = MLPBase(args, obs_shape, use_attn_internal=False, use_cat_self=True)
input_size = self.base.output_size
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(input_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
input_size = self.hidden_size
if self._use_influence_policy:
self.mlp = MLPLayer(obs_shape[0], self.hidden_size,
self._influence_layer_N, self._use_orthogonal, self._activation_id)
input_size += self.hidden_size
self.act = ACTLayer(action_space, input_size, self._use_orthogonal, self._gain)
if self._use_policy_vhead:
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0))
if self._use_popart:
self.v_out = init_(PopArt(input_size, 1, device=device))
else:
self.v_out = init_(nn.Linear(input_size, 1))
self.to(device)
def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
actions, action_log_probs = self.act(actor_features, available_actions, deterministic)
return actions, action_log_probs, rnn_states
def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
action = check(action).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
if active_masks is not None:
active_masks = check(active_masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features, action, available_actions, active_masks = active_masks if self._use_policy_active_masks else None)
values = self.v_out(actor_features) if self._use_policy_vhead else None
return action_log_probs, dist_entropy, values
def get_policy_values(self, obs, rnn_states, masks):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
values = self.v_out(actor_features)
return values | 5,558 | 41.113636 | 181 | py |
TiKick | TiKick-main/tmarl/networks/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/networks/utils/distributions.py | import torch
import torch.nn as nn
from .util import init
"""
Modify standard PyTorch distributions so they are compatible with this code.
"""
#
# Standardize distribution interfaces
#
# Categorical
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entrop(self):
return super.entropy().sum(-1)
def mode(self):
return self.mean
# Bernoulli
class FixedBernoulli(torch.distributions.Bernoulli):
def log_probs(self, actions):
return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1)
def entropy(self):
return super().entropy().sum(-1)
def mode(self):
return torch.gt(self.probs, 0.5).float()
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Categorical, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x, available_actions=None):
x = self.linear(x)
if available_actions is not None:
x[available_actions == 0] = -1e10
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(DiagGaussian, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
class Bernoulli(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Bernoulli, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedBernoulli(logits=x)
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
| 3,466 | 27.891667 | 86 | py |
TiKick | TiKick-main/tmarl/networks/utils/mlp.py |
import torch.nn as nn
from .util import init, get_clones
class MLPLayer(nn.Module):
def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, activation_id):
super(MLPLayer, self).__init__()
self._layer_N = layer_N
active_func = [nn.Tanh(), nn.ReLU(), nn.LeakyReLU(), nn.ELU()][activation_id]
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu', 'leaky_relu', 'leaky_relu'][activation_id])
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)
self.fc1 = nn.Sequential(
init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc_h = nn.Sequential(init_(
nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc2 = get_clones(self.fc_h, self._layer_N)
def forward(self, x):
x = self.fc1(x)
for i in range(self._layer_N):
x = self.fc2[i](x)
return x
class MLPBase(nn.Module):
def __init__(self, args, obs_shape, use_attn_internal=False, use_cat_self=True):
super(MLPBase, self).__init__()
self._use_feature_normalization = args.use_feature_normalization
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_conv1d = args.use_conv1d
self._stacked_frames = args.stacked_frames
self._layer_N = args.layer_N
self.hidden_size = args.hidden_size
obs_dim = obs_shape[0]
inputs_dim = obs_dim
if self._use_feature_normalization:
self.feature_norm = nn.LayerNorm(obs_dim)
self.mlp = MLPLayer(inputs_dim, self.hidden_size,
self._layer_N, self._use_orthogonal, self._activation_id)
def forward(self, x):
if self._use_feature_normalization:
x = self.feature_norm(x)
x = self.mlp(x)
return x
@property
def output_size(self):
return self.hidden_size | 2,116 | 32.603175 | 98 | py |
TiKick | TiKick-main/tmarl/networks/utils/popart.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PopArt(torch.nn.Module):
def __init__(self, input_shape, output_shape, norm_axes=1, beta=0.99999, epsilon=1e-5, device=torch.device("cpu")):
super(PopArt, self).__init__()
self.beta = beta
self.epsilon = epsilon
self.norm_axes = norm_axes
self.tpdv = dict(dtype=torch.float32, device=device)
self.input_shape = input_shape
self.output_shape = output_shape
self.weight = nn.Parameter(torch.Tensor(output_shape, input_shape)).to(**self.tpdv)
self.bias = nn.Parameter(torch.Tensor(output_shape)).to(**self.tpdv)
self.stddev = nn.Parameter(torch.ones(output_shape), requires_grad=False).to(**self.tpdv)
self.mean = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.mean_sq = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
self.mean.zero_()
self.mean_sq.zero_()
self.debiasing_term.zero_()
def forward(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
return F.linear(input_vector, self.weight, self.bias)
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
old_mean, old_stddev = self.mean, self.stddev
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
self.mean.mul_(self.beta).add_(batch_mean * (1.0 - self.beta))
self.mean_sq.mul_(self.beta).add_(batch_sq_mean * (1.0 - self.beta))
self.debiasing_term.mul_(self.beta).add_(1.0 * (1.0 - self.beta))
self.stddev = (self.mean_sq - self.mean ** 2).sqrt().clamp(min=1e-4)
self.weight = self.weight * old_stddev / self.stddev
self.bias = (old_stddev * self.bias + old_mean - self.mean) / self.stddev
def debiased_mean_var(self):
debiased_mean = self.mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
def normalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,796 | 38.968421 | 119 | py |
TiKick | TiKick-main/tmarl/networks/utils/util.py |
import copy
import numpy as np
import torch
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def check(input):
output = torch.from_numpy(input) if type(input) == np.ndarray else input
return output
| 426 | 21.473684 | 76 | py |
TiKick | TiKick-main/tmarl/networks/utils/act.py |
from .distributions import Bernoulli, Categorical, DiagGaussian
import torch
import torch.nn as nn
class ACTLayer(nn.Module):
def __init__(self, action_space, inputs_dim, use_orthogonal, gain):
super(ACTLayer, self).__init__()
self.multidiscrete_action = False
self.continuous_action = False
self.mixed_action = False
if action_space.__class__.__name__ == "Discrete":
action_dim = action_space.n
self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "Box":
self.continuous_action = True
action_dim = action_space.shape[0]
self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiBinary":
action_dim = action_space.shape[0]
self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiDiscrete":
self.multidiscrete_action = True
action_dims = action_space.high - action_space.low + 1
self.action_outs = []
for action_dim in action_dims:
self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain))
self.action_outs = nn.ModuleList(self.action_outs)
else: # discrete + continous
self.mixed_action = True
continous_dim = action_space[0].shape[0]
discrete_dim = action_space[1].n
self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain), Categorical(
inputs_dim, discrete_dim, use_orthogonal, gain)])
def forward(self, x, available_actions=None, deterministic=False):
if self.mixed_action :
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action.float())
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
elif self.multidiscrete_action:
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action)
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.cat(action_log_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
else:
action_logits = self.action_out(x, available_actions)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
return actions, action_log_probs
def get_probs(self, x, available_actions=None):
if self.mixed_action or self.multidiscrete_action:
action_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action_prob = action_logit.probs
action_probs.append(action_prob)
action_probs = torch.cat(action_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
action_probs = action_logits.probs
else:
action_logits = self.action_out(x, available_actions)
action_probs = action_logits.probs
return action_probs
def get_log_1mp(self, x, action, available_actions=None, active_masks=None):
action_logits = self.action_out(x, available_actions)
action_prob = torch.gather(action_logits.probs, 1, action.long())
action_prob = torch.clamp(action_prob, 0, 1-1e-6)
action_log_1mp = torch.log(1 - action_prob)
return action_log_1mp
def evaluate_actions(self, x, action, available_actions=None, active_masks=None):
if self.mixed_action:
a, b = action.split((2, 1), -1)
b = b.long()
action = [a, b]
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
if len(action_logit.entropy().shape) == len(active_masks.shape):
dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum())
else:
dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
dist_entropy = dist_entropy[0] * 0.0025 + dist_entropy[1] * 0.01
elif self.multidiscrete_action:
action = torch.transpose(action, 0, 1)
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong
dist_entropy = torch.tensor(dist_entropy).mean()
elif self.continuous_action:
action_logits = self.action_out(x)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
else:
action_logits = self.action_out(x, available_actions)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
return action_log_probs, dist_entropy | 7,195 | 46.342105 | 121 | py |
TiKick | TiKick-main/tmarl/networks/utils/rnn.py |
import torch
import torch.nn as nn
class RNNLayer(nn.Module):
def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal):
super(RNNLayer, self).__init__()
self._recurrent_N = recurrent_N
self._use_orthogonal = use_orthogonal
self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N)
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
if self._use_orthogonal:
nn.init.orthogonal_(param)
else:
nn.init.xavier_uniform_(param)
self.norm = nn.LayerNorm(outputs_dim)
def forward(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.rnn(x.unsqueeze(0), (hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous())
x = x.squeeze(0)
hxs = hxs.transpose(0, 1)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0)
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.transpose(0, 1)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous()
rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp)
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.reshape(T * N, -1)
hxs = hxs.transpose(0, 1)
x = self.norm(x)
return x, hxs
| 2,816 | 34.2125 | 132 | py |
TiKick | TiKick-main/tmarl/drivers/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/drivers/shared_distributed/base_driver.py | import numpy as np
import torch
def _t2n(x):
return x.detach().cpu().numpy()
class Driver(object):
def __init__(self, config, client=None):
self.all_args = config['all_args']
self.envs = config['envs']
self.eval_envs = config['eval_envs']
self.device = config['device']
self.num_agents = config['num_agents']
if 'signal' in config:
self.actor_id = config['signal'].actor_id
self.weight_ids = config['signal'].weight_ids
else:
self.actor_id = 0
self.weight_ids = [0]
# parameters
self.env_name = self.all_args.env_name
self.algorithm_name = self.all_args.algorithm_name
self.experiment_name = self.all_args.experiment_name
self.use_centralized_V = self.all_args.use_centralized_V
self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state
self.num_env_steps = self.all_args.num_env_steps if hasattr(self.all_args,'num_env_steps') else self.all_args.eval_num
self.episode_length = self.all_args.episode_length
self.n_rollout_threads = self.all_args.n_rollout_threads
self.learner_n_rollout_threads = self.all_args.n_rollout_threads
self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
self.hidden_size = self.all_args.hidden_size
self.recurrent_N = self.all_args.recurrent_N
# interval
self.save_interval = self.all_args.save_interval
self.use_eval = self.all_args.use_eval
self.eval_interval = self.all_args.eval_interval
self.log_interval = self.all_args.log_interval
# dir
self.model_dir = self.all_args.model_dir
if self.algorithm_name == "rmappo":
from tmarl.algorithms.r_mappo_distributed.mappo_algorithm import MAPPOAlgorithm as TrainAlgo
from tmarl.algorithms.r_mappo_distributed.mappo_module import MAPPOModule as AlgoModule
else:
raise NotImplementedError
if self.envs:
share_observation_space = self.envs.share_observation_space[0] \
if self.use_centralized_V else self.envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.envs.observation_space[0],
share_observation_space,
self.envs.action_space[0],
device=self.device)
else:
share_observation_space = self.eval_envs.share_observation_space[0] \
if self.use_centralized_V else self.eval_envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.eval_envs.observation_space[0],
share_observation_space,
self.eval_envs.action_space[0],
device=self.device)
if self.model_dir is not None:
self.restore()
# algorithm
self.trainer = TrainAlgo(self.all_args, self.algo_module, device=self.device)
# buffer
from tmarl.replay_buffers.normal.shared_buffer import SharedReplayBuffer
self.buffer = SharedReplayBuffer(self.all_args,
self.num_agents,
self.envs.observation_space[0] if self.envs else self.eval_envs.observation_space[0],
share_observation_space,
self.envs.action_space[0] if self.envs else self.eval_envs.action_space[0])
def run(self):
raise NotImplementedError
def warmup(self):
raise NotImplementedError
def collect(self, step):
raise NotImplementedError
def insert(self, data):
raise NotImplementedError
def restore(self):
policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor.pt', map_location=self.device)
self.algo_module.actor.load_state_dict(policy_actor_state_dict)
| 4,244 | 39.04717 | 126 | py |
TiKick | TiKick-main/tmarl/drivers/shared_distributed/football_driver.py | from tqdm import tqdm
import numpy as np
from tmarl.drivers.shared_distributed.base_driver import Driver
def _t2n(x):
return x.detach().cpu().numpy()
class FootballDriver(Driver):
def __init__(self, config):
super(FootballDriver, self).__init__(config)
def run(self):
self.trainer.prep_rollout()
episodes = int(self.num_env_steps)
total_num_steps = 0
for episode in range(episodes):
print('Episode {}:'.format(episode))
self.eval(total_num_steps)
def eval(self, total_num_steps):
eval_episode_rewards = []
eval_obs, eval_share_obs, eval_available_actions = self.eval_envs.reset()
agent_num = eval_obs.shape[1]
used_buffer = self.buffer
rnn_shape = [self.n_eval_rollout_threads, agent_num, *used_buffer.rnn_states_critic.shape[3:]]
eval_rnn_states = np.zeros(rnn_shape, dtype=np.float32)
eval_rnn_states_critic = np.zeros(rnn_shape, dtype=np.float32)
eval_masks = np.ones((self.n_eval_rollout_threads, agent_num, 1), dtype=np.float32)
finished = None
for eval_step in tqdm(range(3001)):
self.trainer.prep_rollout()
_, eval_action, eval_action_log_prob, eval_rnn_states, _ = \
self.trainer.algo_module.get_actions(np.concatenate(eval_share_obs),
np.concatenate(eval_obs),
np.concatenate(eval_rnn_states),
None,
np.concatenate(eval_masks),
np.concatenate(eval_available_actions),
deterministic=True)
eval_actions = np.array(
np.split(_t2n(eval_action), self.n_eval_rollout_threads))
eval_rnn_states = np.array(
np.split(_t2n(eval_rnn_states), self.n_eval_rollout_threads))
if self.eval_envs.action_space[0].__class__.__name__ == 'Discrete':
eval_actions_env = np.squeeze(
np.eye(self.eval_envs.action_space[0].n)[eval_actions], 2)
else:
raise NotImplementedError
# Obser reward and next obs
eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, eval_available_actions = \
self.eval_envs.step(eval_actions_env)
eval_rewards = eval_rewards.reshape([-1, agent_num]) # [roll_out, num_agents]
if finished is None:
eval_r = eval_rewards[:,:self.num_agents]
eval_episode_rewards.append(eval_r)
finished = eval_dones.copy()
else:
eval_r = (eval_rewards * ~finished)[:,:self.num_agents]
eval_episode_rewards.append(eval_r)
finished = eval_dones.copy() | finished
eval_masks = np.ones(
(self.n_eval_rollout_threads, agent_num, 1), dtype=np.float32)
eval_masks[eval_dones == True] = np.zeros(
((eval_dones == True).sum(), 1), dtype=np.float32)
eval_rnn_states[eval_dones == True] = np.zeros(
((eval_dones == True).sum(), self.recurrent_N, self.hidden_size), dtype=np.float32)
if finished.all() == True:
break
eval_episode_rewards = np.array(eval_episode_rewards) # [step,rollout,num_agents]
ally_goal = np.sum((eval_episode_rewards == 1), axis=0)
enemy_goal = np.sum((eval_episode_rewards == -1), axis=0)
net_goal = np.sum(eval_episode_rewards, axis=0)
winning_rate = np.mean(net_goal, axis=-1)
eval_env_infos = {}
eval_env_infos['eval_average_winning_rate'] = winning_rate>0
eval_env_infos['eval_average_losing_rate'] = winning_rate<0
eval_env_infos['eval_average_draw_rate'] = winning_rate==0
eval_env_infos['eval_average_ally_score'] = ally_goal
eval_env_infos['eval_average_enemy_score'] = enemy_goal
eval_env_infos['eval_average_net_score'] = net_goal
print("\tSuccess Rate: " + str(np.mean(winning_rate>0)) )
| 4,315 | 42.16 | 102 | py |
TiKick | TiKick-main/tmarl/envs/env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
from tmarl.utils.util import tile_images
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class ShareVecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, share_observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.share_observation_space = share_observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if np.all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'get_max_step':
remote.send((env.max_steps))
elif cmd == 'get_action': # for behavior cloning
action = env.get_action()
remote.send((action))
else:
raise NotImplementedError
class SubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def get_max_step(self):
for remote in self.remotes:
remote.send(('get_max_step', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def shareworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob, s_ob, available_actions = env.reset()
else:
if np.all(done):
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'render_vulnerability':
fr = env.render_vulnerability(data)
remote.send((fr))
elif cmd == 'get_action': # for behavior cloning
action = env.get_action()
remote.send((action))
else:
raise NotImplementedError
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def get_action(self): # for behavior clonging
for remote in self.remotes:
remote.send(('get_action', None))
results = [remote.recv() for remote in self.remotes]
return np.concatenate(results)
# single env
class DummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i] = self.envs[i].reset()
self.actions = None
return obs, rews, dones, infos
def reset(self):
obs = [env.reset() for env in self.envs]
return np.array(obs)
def get_max_step(self):
return [env.max_steps for env in self.envs]
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human", playeridx=None):
if mode == "rgb_array":
if playeridx == None:
return np.array([env.render(mode=mode) for env in self.envs])
else:
return np.array([env.render(mode=mode,playeridx=playeridx) for env in self.envs])
elif mode == "human":
for env in self.envs:
if playeridx == None:
env.render(mode=mode)
else:
env.render(mode=mode, playeridx=playeridx)
else:
raise NotImplementedError
class ShareDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self):
results = [env.reset() for env in self.envs]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
def save_replay(self):
for env in self.envs:
env.save_replay()
def get_action(self): # for behavior cloning
results = [env.reset() for env in self.envs]
return results
| 15,351 | 32.373913 | 118 | py |
TiKick | TiKick-main/tmarl/envs/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/envs/football/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/football.py | import numpy as np
import gym
from ray.rllib.env.multi_agent_env import MultiAgentEnv
import tmarl.envs.football.env as football_env
class RllibGFootball(MultiAgentEnv):
"""An example of a wrapper for GFootball to make it compatible with rllib."""
def __init__(self, all_args, rank, log_dir=None, isEval=False):
self.num_agents = all_args.num_agents
self.num_rollout = all_args.n_rollout_threads
self.isEval = isEval
self.rank = rank
# create env
# need_render = (rank == 0) and isEval
need_render = (rank == 0)
# and (not isEval or self.use_behavior_cloning)
self.env = football_env.create_environment(
env_name=all_args.scenario_name, stacked=False,
logdir=log_dir,
representation=all_args.representation,
rewards='scoring' if isEval else all_args.rewards,
write_goal_dumps=False,
write_full_episode_dumps=need_render,
render=need_render,
dump_frequency=1 if need_render else 0,
number_of_left_players_agent_controls=self.num_agents,
number_of_right_players_agent_controls=0,
other_config_options={'action_set':'full'})
# state
self.last_loffside = np.zeros(11)
self.last_roffside = np.zeros(11)
# dimension
self.action_size = 33
if all_args.scenario_name == "11_vs_11_kaggle":
self.avail_size = 20
else:
self.avail_size = 19
if all_args.representation == 'raw':
obs_space_dim = 268
obs_space_low = np.zeros(obs_space_dim) - 1e6
obs_space_high = np.zeros(obs_space_dim) + 1e6
obs_space_type = 'float64'
else:
raise NotImplementedError
self.action_space = [gym.spaces.Discrete(
self.action_size) for _ in range(self.num_agents)]
self.observation_space = [gym.spaces.Box(
low=obs_space_low,
high=obs_space_high,
dtype=obs_space_type) for _ in range(self.num_agents)]
self.share_observation_space = [gym.spaces.Box(
low=obs_space_low,
high=obs_space_high,
dtype=obs_space_type) for _ in range(self.num_agents)]
def reset(self):
# available actions
avail_actions = np.ones([self.num_agents, self.action_size])
avail_actions[:, self.avail_size:] = 0
# state
self.last_loffside = np.zeros(11)
self.last_roffside = np.zeros(11)
# obs
raw_obs = self.env.reset()
raw_obs = self._notFullGame(raw_obs)
obs = self.raw2vec(raw_obs)
share_obs = obs.copy()
return obs, share_obs, avail_actions
def step(self, actions):
# step
actions = np.argmax(actions, axis=-1)
raw_o, r, d, info = self.env.step(actions.astype('int32'))
raw_o = self._notFullGame(raw_o)
obs = self.raw2vec(raw_o)
share_obs = obs.copy()
# available actions
avail_actions = np.ones([self.num_agents, self.action_size])
avail_actions[:, self.avail_size:] = 0
# translate to specific form
rewards = []
infos, dones = [], []
for i in range(self.num_agents):
infos.append(info)
dones.append(d)
reward = r[i] if self.num_agents > 1 else r
reward = -0.01 if d and reward < 1 and not self.isEval else reward
rewards.append(reward)
rewards = np.expand_dims(np.array(rewards), axis=1)
return obs, share_obs, rewards, dones, infos, avail_actions
def seed(self, seed=None):
if seed is None:
np.random.seed(1)
else:
np.random.seed(seed)
def close(self):
self.env.close()
def raw2vec(self, raw_obs):
obs = []
ally = np.array(raw_obs[0]['left_team'])
ally_d = np.array(raw_obs[0]['left_team_direction'])
enemy = np.array(raw_obs[0]['right_team'])
enemy_d = np.array(raw_obs[0]['right_team_direction'])
lo, ro = self.get_offside(raw_obs[0])
for a in range(self.num_agents):
# prepocess
me = ally[int(raw_obs[a]['active'])]
ball = raw_obs[a]['ball'][:2]
ball_dist = np.linalg.norm(me - ball)
enemy_dist = np.linalg.norm(me - enemy, axis=-1)
to_enemy = enemy - me
to_ally = ally - me
to_ball = ball - me
o = []
# shape = 0
o.extend(ally.flatten())
o.extend(ally_d.flatten())
o.extend(enemy.flatten())
o.extend(enemy_d.flatten())
# shape = 88
o.extend(raw_obs[a]['ball'])
o.extend(raw_obs[a]['ball_direction'])
# shape = 94
if raw_obs[a]['ball_owned_team'] == -1:
o.extend([1, 0, 0])
if raw_obs[a]['ball_owned_team'] == 0:
o.extend([0, 1, 0])
if raw_obs[a]['ball_owned_team'] == 1:
o.extend([0, 0, 1])
# shape = 97
active = [0] * 11
active[raw_obs[a]['active']] = 1
o.extend(active)
# shape = 108
game_mode = [0] * 7
game_mode[raw_obs[a]['game_mode']] = 1
o.extend(game_mode)
# shape = 115
o.extend(raw_obs[a]['sticky_actions'][:10])
# shape = 125)
ball_dist = 1 if ball_dist > 1 else ball_dist
o.extend([ball_dist])
# shape = 126)
o.extend(raw_obs[a]['left_team_tired_factor'])
# shape = 137)
o.extend(raw_obs[a]['left_team_yellow_card'])
# shape = 148)
o.extend(raw_obs[a]['left_team_active']) # red cards
# shape = 159)
o.extend(lo) # !
# shape = 170)
o.extend(ro) # !
# shape = 181)
o.extend(enemy_dist)
# shape = 192)
to_ally[:, 0] /= 2
o.extend(to_ally.flatten())
# shape = 214)
to_enemy[:, 0] /= 2
o.extend(to_enemy.flatten())
# shape = 236)
to_ball[0] /= 2
o.extend(to_ball.flatten())
# shape = 238)
steps_left = raw_obs[a]['steps_left']
o.extend([1.0 * steps_left / 3001]) # steps left till end
if steps_left > 1500:
steps_left -= 1501 # steps left till halfend
steps_left = 1.0 * min(steps_left, 300.0) # clip
steps_left /= 300.0
o.extend([steps_left])
score_ratio = 1.0 * \
(raw_obs[a]['score'][0] - raw_obs[a]['score'][1])
score_ratio /= 5.0
score_ratio = min(score_ratio, 1.0)
score_ratio = max(-1.0, score_ratio)
o.extend([score_ratio])
# shape = 241
o.extend([0.0] * 27)
# shape = 268
obs.append(o)
return np.array(obs)
def get_offside(self, obs):
ball = np.array(obs['ball'][:2])
ally = np.array(obs['left_team'])
enemy = np.array(obs['right_team'])
if obs['game_mode'] != 0:
self.last_loffside = np.zeros(11, np.float32)
self.last_roffside = np.zeros(11, np.float32)
return np.zeros(11, np.float32), np.zeros(11, np.float32)
need_recalc = False
effective_ownball_team = -1
effective_ownball_player = -1
if obs['ball_owned_team'] > -1:
effective_ownball_team = obs['ball_owned_team']
effective_ownball_player = obs['ball_owned_player']
need_recalc = True
else:
ally_dist = np.linalg.norm(ball - ally, axis=-1)
enemy_dist = np.linalg.norm(ball - enemy, axis=-1)
if np.min(ally_dist) < np.min(enemy_dist):
if np.min(ally_dist) < 0.017:
need_recalc = True
effective_ownball_team = 0
effective_ownball_player = np.argmin(ally_dist)
elif np.min(enemy_dist) < np.min(ally_dist):
if np.min(enemy_dist) < 0.017:
need_recalc = True
effective_ownball_team = 1
effective_ownball_player = np.argmin(enemy_dist)
if not need_recalc:
return self.last_loffside, self.last_roffside
left_offside = np.zeros(11, np.float32)
right_offside = np.zeros(11, np.float32)
if effective_ownball_team == 0:
right_xs = [obs['right_team'][k][0] for k in range(1, 11)]
right_xs = np.array(right_xs)
right_xs.sort()
for k in range(1, 11):
if obs['left_team'][k][0] > right_xs[-1] and k != effective_ownball_player \
and obs['left_team'][k][0] > 0.0:
left_offside[k] = 1.0
else:
left_xs = [obs['left_team'][k][0] for k in range(1, 11)]
left_xs = np.array(left_xs)
left_xs.sort()
for k in range(1, 11):
if obs['right_team'][k][0] < left_xs[0] and k != effective_ownball_player \
and obs['right_team'][k][0] < 0.0:
right_offside[k] = 1.0
self.last_loffside = left_offside
self.last_roffside = right_offside
return left_offside, right_offside
def _notFullGame(self, raw_obs):
# use this function when there are less than 11 players in the scenario
left_ok = len(raw_obs[0]['left_team']) == 11
right_ok = len(raw_obs[0]['right_team']) == 11
if left_ok and right_ok:
return raw_obs
# set player's coordinate at (-1,0), set player's velocity as (0,0)
for obs in raw_obs:
obs['left_team'] = np.array(obs['left_team'])
obs['right_team'] = np.array(obs['right_team'])
obs['left_team_direction'] = np.array(obs['left_team_direction'])
obs['right_team_direction'] = np.array(obs['right_team_direction'])
while len(obs['left_team']) < 11:
obs['left_team'] = np.concatenate([obs['left_team'], np.array([[-1,0]])], axis=0)
obs['left_team_direction'] = np.concatenate([obs['left_team_direction'], np.zeros([1,2])], axis=0)
obs['left_team_tired_factor'] = np.concatenate([obs['left_team_tired_factor'], np.zeros(1)], axis=0)
obs['left_team_yellow_card'] = np.concatenate([obs['left_team_yellow_card'], np.zeros(1)], axis=0)
obs['left_team_active'] = np.concatenate([obs['left_team_active'], np.ones(1)], axis=0)
while len(obs['right_team']) < 11:
obs['right_team'] = np.concatenate([obs['right_team'], np.array([[-1,0]])], axis=0)
obs['right_team_direction'] = np.concatenate([obs['right_team_direction'], np.zeros([1,2])], axis=0)
return raw_obs | 11,164 | 38.038462 | 116 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/11_vs_11_kaggle.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 3000
builder.config().second_half = 1500
builder.config().right_team_difficulty = 1.0
builder.config().left_team_difficulty = 1.0
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
first_team = Team.e_Left
second_team = Team.e_Right
else:
first_team = Team.e_Right
second_team = Team.e_Left
builder.SetTeam(first_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.000000, 0.020000, e_PlayerRole_RM)
builder.AddPlayer(0.000000, -0.020000, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
builder.SetTeam(second_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(-0.050000, 0.000000, e_PlayerRole_RM)
builder.AddPlayer(-0.010000, 0.216102, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
| 2,396 | 39.627119 | 77 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/11_vs_11_lazy.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 3000
builder.config().second_half = 1500
builder.config().right_team_difficulty = 1.0
builder.config().left_team_difficulty = 1.0
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
first_team = Team.e_Left
second_team = Team.e_Right
else:
first_team = Team.e_Right
second_team = Team.e_Left
builder.SetTeam(first_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.000000, 0.020000, e_PlayerRole_RM)
builder.AddPlayer(0.000000, -0.020000, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
builder.SetTeam(second_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(-0.050000, 0.000000, e_PlayerRole_RM, lazy=True)
builder.AddPlayer(-0.010000, 0.216102, e_PlayerRole_CF, lazy=True)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB, lazy=True)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB, lazy=True)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB, lazy=True)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB, lazy=True)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM, lazy=True)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM, lazy=True)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM, lazy=True)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM, lazy=True)
| 2,506 | 41.491525 | 77 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_3_vs_1_with_keeper.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.62, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.6, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.7, 0.2, e_PlayerRole_CM)
builder.AddPlayer(0.7, -0.2, e_PlayerRole_CM)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, 0.0, e_PlayerRole_CB)
| 1,324 | 31.317073 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_empty_goal.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.02, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(1.0, 0.0, e_PlayerRole_GK)
| 1,179 | 30.052632 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_run_to_score_with_keeper.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.02, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.12, 0.2, e_PlayerRole_LB)
builder.AddPlayer(0.12, 0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.12, -0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, -0.2, e_PlayerRole_RB)
| 1,422 | 32.093023 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_counterattack_hard.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.26, -0.11)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(-0.672, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.75, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.75, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.672, 0.19576, e_PlayerRole_RB)
builder.AddPlayer(-0.434, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.434, 0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.5, -0.3161, e_PlayerRole_CM)
builder.AddPlayer(0.25, -0.1, e_PlayerRole_LM)
builder.AddPlayer(0.25, 0.1, e_PlayerRole_RM)
builder.AddPlayer(0.35, 0.316102, e_PlayerRole_CF)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.128, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.4, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.4, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(0.128, -0.19576, e_PlayerRole_RB)
builder.AddPlayer(0.365, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.282, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.365, 0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.54, -0.3161, e_PlayerRole_LM)
builder.AddPlayer(0.51, 0.0, e_PlayerRole_RM)
builder.AddPlayer(0.54, 0.316102, e_PlayerRole_CF)
| 2,186 | 36.706897 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_run_to_score.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.02, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.12, 0.2, e_PlayerRole_LB)
builder.AddPlayer(0.12, 0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.12, -0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, -0.2, e_PlayerRole_RB)
| 1,421 | 32.069767 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_empty_goal_close.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.77, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.75, 0.0, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(1.0, 0.0, e_PlayerRole_GK)
| 1,180 | 30.078947 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_corner.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = False
builder.SetBallPosition(0.99, 0.41)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(1.0, 0.42, e_PlayerRole_LB)
builder.AddPlayer(0.7, 0.15, e_PlayerRole_CB)
builder.AddPlayer(0.7, 0.05, e_PlayerRole_CB)
builder.AddPlayer(0.7, -0.05, e_PlayerRole_RB)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.6, 0.35, e_PlayerRole_CM)
builder.AddPlayer(0.8, 0.07, e_PlayerRole_CM)
builder.AddPlayer(0.8, -0.03, e_PlayerRole_LM)
builder.AddPlayer(0.8, -0.13, e_PlayerRole_RM)
builder.AddPlayer(0.7, -0.3, e_PlayerRole_CF)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, -0.18, e_PlayerRole_LB)
builder.AddPlayer(-0.75, -0.08, e_PlayerRole_CB)
builder.AddPlayer(-0.75, 0.02, e_PlayerRole_CB)
builder.AddPlayer(-1.0, -0.1, e_PlayerRole_RB)
builder.AddPlayer(-0.8, -0.25, e_PlayerRole_CM)
builder.AddPlayer(-0.88, -0.07, e_PlayerRole_CM)
builder.AddPlayer(-0.88, 0.03, e_PlayerRole_CM)
builder.AddPlayer(-0.88, 0.13, e_PlayerRole_LM)
builder.AddPlayer(-0.75, 0.25, e_PlayerRole_RM)
builder.AddPlayer(-0.2, 0.0, e_PlayerRole_CF)
| 2,118 | 35.534483 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/__init__.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gfootball_engine as libgame
e_PlayerRole_GK = libgame.e_PlayerRole.e_PlayerRole_GK
e_PlayerRole_CB = libgame.e_PlayerRole.e_PlayerRole_CB
e_PlayerRole_LB = libgame.e_PlayerRole.e_PlayerRole_LB
e_PlayerRole_RB = libgame.e_PlayerRole.e_PlayerRole_RB
e_PlayerRole_DM = libgame.e_PlayerRole.e_PlayerRole_DM
e_PlayerRole_CM = libgame.e_PlayerRole.e_PlayerRole_CM
e_PlayerRole_LM = libgame.e_PlayerRole.e_PlayerRole_LM
e_PlayerRole_RM = libgame.e_PlayerRole.e_PlayerRole_RM
e_PlayerRole_AM = libgame.e_PlayerRole.e_PlayerRole_AM
e_PlayerRole_CF = libgame.e_PlayerRole.e_PlayerRole_CF
Team = libgame.e_Team
| 1,198 | 38.966667 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_pass_and_shoot_with_keeper.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.7, -0.28)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.7, 0.0, e_PlayerRole_CB)
builder.AddPlayer(0.7, -0.3, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, 0.3, e_PlayerRole_CB)
| 1,278 | 30.975 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_run_pass_and_shoot_with_keeper.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.7, -0.28)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.7, 0.0, e_PlayerRole_CB)
builder.AddPlayer(0.7, -0.3, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, 0.1, e_PlayerRole_CB)
| 1,278 | 30.975 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_counterattack_easy.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.26, -0.11)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(-0.672, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.75, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.75, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.672, 0.19576, e_PlayerRole_RB)
builder.AddPlayer(-0.434, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.434, 0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.5, -0.3161, e_PlayerRole_CM)
builder.AddPlayer(0.25, -0.1, e_PlayerRole_LM)
builder.AddPlayer(0.25, 0.1, e_PlayerRole_RM)
builder.AddPlayer(0.35, 0.316102, e_PlayerRole_CF)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.128, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(0.4, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.4, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(0.128, -0.19576, e_PlayerRole_RB)
builder.AddPlayer(0.365, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.282, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.365, 0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.54, -0.3161, e_PlayerRole_LM)
builder.AddPlayer(0.51, 0.0, e_PlayerRole_RM)
builder.AddPlayer(0.54, 0.316102, e_PlayerRole_CF)
| 2,185 | 36.689655 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/env/football_env_core.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Football environment as close as possible to a GYM environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import copy
try:
import gfootball_engine as libgame
from gfootball_engine import GameState
except ImportError:
print('Cannot import gfootball_engine. Package was not installed properly.')
from tmarl.envs.football.env import config as cfg
from gfootball.env import constants
from gfootball.env import football_action_set
from gfootball.env import observation_processor
import numpy as np
import six.moves.cPickle
from six.moves import range
import timeit
_unused_engines = []
_unused_rendering_engine = None
_active_rendering = False
try:
import cv2
except ImportError:
import cv2
class EnvState(object):
def __init__(self):
self.previous_score_diff = 0
self.previous_game_mode = -1
self.prev_ball_owned_team = -1
class FootballEnvCore(object):
def __init__(self, config):
global _unused_engines
self._config = config
self._sticky_actions = football_action_set.get_sticky_actions(config)
self._use_rendering_engine = False
if _unused_engines:
self._env = _unused_engines.pop()
else:
self._env = self._get_new_env()
# Reset is needed here to make sure render() API call before reset() API
# call works fine (get/setState makes sure env. config is the same).
self.reset(inc=0)
def _get_new_env(self):
env = libgame.GameEnv()
env.game_config.physics_steps_per_frame = self._config[
'physics_steps_per_frame']
env.game_config.render_resolution_x = self._config['render_resolution_x']
env.game_config.render_resolution_y = self._config['render_resolution_y']
return env
def _reset(self, animations, inc):
global _unused_engines
global _unused_rendering_engine
assert (self._env.state == GameState.game_created or
self._env.state == GameState.game_running or
self._env.state == GameState.game_done)
# Variables that are part of the set_state/get_state snapshot.
self._state = EnvState()
# Variables being re-computed upon set_state call, no need to snapshot.
self._observation = None
# Not snapshoted variables.
self._steps_time = 0
self._step = 0
self._config.NewScenario(inc=inc)
if self._env.state == GameState.game_created:
self._env.start_game()
self._env.state = GameState.game_running
scenario_config = self._config.ScenarioConfig()
assert (
not scenario_config.dynamic_player_selection or
not scenario_config.control_all_players
), ('For this scenario you need to control either 0 or all players on the '
'team ({} for left team, {} for right team).').format(
scenario_config.controllable_left_players,
scenario_config.controllable_right_players)
self._env.reset(scenario_config, animations)
def reset(self, inc=1):
"""Reset environment for a new episode using a given config."""
self._episode_start = timeit.default_timer()
self._action_set = football_action_set.get_action_set(self._config)
trace = observation_processor.ObservationProcessor(self._config)
self._cumulative_reward = 0
self._step_count = 0
self._trace = trace
self._reset(self._env.game_config.render, inc=inc)
while not self._retrieve_observation():
self._env.step()
return True
def _rendering_in_use(self):
global _active_rendering
if not self._use_rendering_engine:
assert not _active_rendering, ('Environment does not support multiple '
'rendering instances in the same process.')
_active_rendering = True
self._use_rendering_engine = True
self._env.game_config.render = True
def _release_engine(self):
global _unused_engines
global _unused_rendering_engine
global _active_rendering
if self._env:
if self._use_rendering_engine:
assert not _unused_rendering_engine
_unused_rendering_engine = self._env
_active_rendering = False
else:
_unused_engines.append(self._env)
self._env = None
def close(self):
self._release_engine()
if self._trace:
del self._trace
self._trace = None
def __del__(self):
self.close()
def step(self, action, extra_data={}):
assert self._env.state != GameState.game_done, (
'Cant call step() once episode finished (call reset() instead)')
assert self._env.state == GameState.game_running, (
'reset() must be called before step()')
action = [
football_action_set.named_action_from_action_set(self._action_set, a)
for a in action
]
self._step_count += 1
assert len(action) == (
self._env.config.left_agents + self._env.config.right_agents)
debug = {}
debug['action'] = action
action_index = 0
for left_team in [True, False]:
agents = self._env.config.left_agents if left_team else self._env.config.right_agents
for i in range(agents):
player_action = action[action_index]
# If agent 'holds' the game for too long, just start it.
if self._env.waiting_for_game_count == 20:
player_action = football_action_set.action_short_pass
elif self._env.waiting_for_game_count > 20:
player_action = football_action_set.action_idle
controlled_players = self._observation[
'left_agent_controlled_player'] if left_team else self._observation[
'right_agent_controlled_player']
if self._observation['ball_owned_team'] != -1 and self._observation[
'ball_owned_team'] ^ left_team and controlled_players[
i] == self._observation['ball_owned_player']:
if self._env.waiting_for_game_count < 30:
player_action = football_action_set.action_left
else:
player_action = football_action_set.action_right
action_index += 1
assert isinstance(player_action, football_action_set.CoreAction)
self._env.perform_action(player_action._backend_action, left_team, i)
while True:
enter_time = timeit.default_timer()
self._env.step()
self._steps_time += timeit.default_timer() - enter_time
if self._retrieve_observation():
break
if 'frame' in self._observation:
self._trace.add_frame(self._observation['frame'])
debug['frame_cnt'] = self._step
# Finish the episode on score.
if self._env.config.end_episode_on_score:
if self._observation['score'][0] > 0 or self._observation['score'][1] > 0:
self._env.state = GameState.game_done
# Finish the episode if the game is out of play (e.g. foul, corner etc)
if (self._env.config.end_episode_on_out_of_play and
self._observation['game_mode'] != int(
libgame.e_GameMode.e_GameMode_Normal) and
self._state.previous_game_mode == int(
libgame.e_GameMode.e_GameMode_Normal)):
self._env.state = GameState.game_done
self._state.previous_game_mode = self._observation['game_mode']
# End episode when team possessing the ball changes.
if (self._env.config.end_episode_on_possession_change and
self._observation['ball_owned_team'] != -1 and
self._state.prev_ball_owned_team != -1 and
self._observation['ball_owned_team'] !=
self._state.prev_ball_owned_team):
self._env.state = GameState.game_done
if self._observation['ball_owned_team'] != -1:
self._state.prev_ball_owned_team = self._observation['ball_owned_team']
# Compute reward.
score_diff = self._observation['score'][0] - self._observation['score'][1]
reward = score_diff - self._state.previous_score_diff
self._state.previous_score_diff = score_diff
if reward == 1:
self._trace.write_dump('score')
elif reward == -1:
self._trace.write_dump('lost_score')
debug['reward'] = reward
if self._observation['game_mode'] != int(
libgame.e_GameMode.e_GameMode_Normal):
self._env.waiting_for_game_count += 1
else:
self._env.waiting_for_game_count = 0
if self._step >= self._env.config.game_duration:
self._env.state = GameState.game_done
episode_done = self._env.state == GameState.game_done
debug['time'] = timeit.default_timer()
debug.update(extra_data)
self._cumulative_reward += reward
single_observation = copy.deepcopy(self._observation)
trace = {
'debug': debug,
'observation': single_observation,
'reward': reward,
'cumulative_reward': self._cumulative_reward
}
info = {}
self._trace.update(trace)
dumps = self._trace.process_pending_dumps(episode_done)
if dumps:
info['dumps'] = dumps
if episode_done:
del self._trace
self._trace = None
fps = self._step_count / (debug['time'] - self._episode_start)
game_fps = self._step_count / self._steps_time
logging.info(
'Episode reward: %.2f score: [%d, %d], steps: %d, '
'FPS: %.1f, gameFPS: %.1f', self._cumulative_reward,
single_observation['score'][0], single_observation['score'][1],
self._step_count, fps, game_fps)
if self._step_count == 1:
# Start writing episode_done
self.write_dump('episode_done')
return self._observation, reward, episode_done, info
def _retrieve_observation(self):
"""Constructs observations exposed by the environment.
Returns whether game
is on or not.
"""
info = self._env.get_info()
result = {}
if self._env.game_config.render:
frame = self._env.get_frame()
frame = np.frombuffer(frame, dtype=np.uint8)
frame = np.reshape(frame, [
self._config['render_resolution_x'],
self._config['render_resolution_y'], 3
])
frame = np.reshape(
np.concatenate([frame[:, :, 0], frame[:, :, 1], frame[:, :, 2]]), [
3, self._config['render_resolution_y'],
self._config['render_resolution_x']
])
frame = np.transpose(frame, [1, 2, 0])
frame = np.flip(frame, 0)
result['frame'] = frame
result['ball'] = np.array(
[info.ball_position[0], info.ball_position[1], info.ball_position[2]])
# Ball's movement direction represented as [x, y] distance per step.
result['ball_direction'] = np.array([
info.ball_direction[0], info.ball_direction[1], info.ball_direction[2]
])
# Ball's rotation represented as [x, y, z] rotation angle per step.
result['ball_rotation'] = np.array(
[info.ball_rotation[0], info.ball_rotation[1], info.ball_rotation[2]])
self._convert_players_observation(info.left_team, 'left_team', result)
self._convert_players_observation(info.right_team, 'right_team', result)
result['left_agent_sticky_actions'] = []
result['left_agent_controlled_player'] = []
result['right_agent_sticky_actions'] = []
result['right_agent_controlled_player'] = []
for i in range(self._env.config.left_agents):
result['left_agent_controlled_player'].append(
info.left_controllers[i].controlled_player)
result['left_agent_sticky_actions'].append(
np.array(self.sticky_actions_state(True, i), dtype=np.uint8))
for i in range(self._env.config.right_agents):
result['right_agent_controlled_player'].append(
info.right_controllers[i].controlled_player)
result['right_agent_sticky_actions'].append(
np.array(self.sticky_actions_state(False, i), dtype=np.uint8))
result['game_mode'] = int(info.game_mode)
result['score'] = [info.left_goals, info.right_goals]
result['ball_owned_team'] = info.ball_owned_team
result['ball_owned_player'] = info.ball_owned_player
result['steps_left'] = self._env.config.game_duration - info.step
self._observation = result
self._step = info.step
return info.is_in_play
def _convert_players_observation(self, players, name, result):
"""Converts internal players representation to the public one.
Internal representation comes directly from gameplayfootball engine.
Public representation is part of environment observations.
Args:
players: collection of team players to convert.
name: name of the team being converted (left_team or right_team).
result: collection where conversion result is added.
"""
positions = []
directions = []
tired_factors = []
active = []
yellow_cards = []
roles = []
designated_player = -1
for id, player in enumerate(players):
positions.append(player.position[0])
positions.append(player.position[1])
directions.append(player.direction[0])
directions.append(player.direction[1])
tired_factors.append(player.tired_factor)
active.append(player.is_active)
yellow_cards.append(player.has_card)
roles.append(player.role)
if player.designated_player:
designated_player = id
result[name] = np.reshape(np.array(positions), [-1, 2])
# Players' movement direction represented as [x, y] distance per step.
result['{}_direction'.format(name)] = np.reshape(
np.array(directions), [-1, 2])
# Players' tired factor in the range [0, 1] (0 means not tired).
result['{}_tired_factor'.format(name)] = np.array(tired_factors)
result['{}_active'.format(name)] = np.array(active)
result['{}_yellow_card'.format(name)] = np.array(yellow_cards)
result['{}_roles'.format(name)] = np.array(roles)
result['{}_designated_player'.format(name)] = designated_player
def observation(self):
"""Returns the current observation of the game."""
assert (self._env.state == GameState.game_running or
self._env.state == GameState.game_done), (
'reset() must be called before observation()')
return copy.deepcopy(self._observation)
def sticky_actions_state(self, left_team, player_id):
result = []
for a in self._sticky_actions:
result.append(
self._env.sticky_action_state(a._backend_action, left_team,
player_id))
return np.uint8(result)
def get_state(self, to_pickle):
assert (self._env.state == GameState.game_running or
self._env.state == GameState.game_done), (
'reset() must be called before get_state()')
to_pickle['FootballEnvCore'] = self._state
pickle = six.moves.cPickle.dumps(to_pickle)
return self._env.get_state(pickle)
def set_state(self, state):
assert (self._env.state == GameState.game_running or
self._env.state == GameState.game_done), (
'reset() must be called before set_state()')
res = self._env.set_state(state)
assert self._retrieve_observation()
from_picle = six.moves.cPickle.loads(res)
self._state = from_picle['FootballEnvCore']
if self._trace is None:
self._trace = observation_processor.ObservationProcessor(self._config)
return from_picle
def tracker_setup(self, start, end):
self._env.tracker_setup(start, end)
def write_dump(self, name):
return self._trace.write_dump(name)
def render(self, mode):
global _unused_rendering_engine
if self._env.state == GameState.game_created:
self._rendering_in_use()
return False
if not self._env.game_config.render:
if not self._use_rendering_engine:
if self._env.state != GameState.game_created:
state = self.get_state({})
self._release_engine()
if _unused_rendering_engine:
self._env = _unused_rendering_engine
_unused_rendering_engine = None
else:
self._env = self._get_new_env()
self._rendering_in_use()
self._reset(animations=False, inc=0)
self.set_state(state)
# We call render twice, as the first call has bad camera position.
self._env.render(False)
else:
self._env.game_config.render = True
self._env.render(True)
self._retrieve_observation()
if mode == 'rgb_array':
frame = self._observation['frame']
b, g, r = cv2.split(frame)
return cv2.merge((r, g, b))
elif mode == 'human':
return True
return False
def disable_render(self):
self._env.game_config.render = False
| 17,067 | 37.269058 | 91 | py |
TiKick | TiKick-main/tmarl/envs/football/env/script_helpers.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of functions used by command line scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tmarl.envs.football.env import config
from gfootball.env import football_action_set
from tmarl.envs.football.env import football_env
from gfootball.env import observation_processor
import copy
import six.moves.cPickle
import os
import tempfile
class ScriptHelpers(object):
"""Set of methods used by command line scripts."""
def __init__(self):
pass
def __modify_trace(self, replay, fps):
"""Adopt replay to the new framerate and add additional steps at the end."""
trace = []
min_fps = replay[0]['debug']['config']['physics_steps_per_frame']
assert fps % min_fps == 0, (
'Trace has to be rendered in framerate being multiple of {}'.format(
min_fps))
assert fps <= 100, ('Framerate of up to 100 is supported')
empty_steps = int(fps / min_fps) - 1
for f in replay:
trace.append(f)
idle_step = copy.deepcopy(f)
idle_step['debug']['action'] = [football_action_set.action_idle
] * len(f['debug']['action'])
for _ in range(empty_steps):
trace.append(idle_step)
# Add some empty steps at the end, so that we can record videos.
for _ in range(10):
trace.append(idle_step)
return trace
def __build_players(self, dump_file, spec):
players = []
for player in spec:
players.extend(['replay:path={},left_players=1'.format(
dump_file)] * config.count_left_players(player))
players.extend(['replay:path={},right_players=1'.format(
dump_file)] * config.count_right_players(player))
return players
def load_dump(self, dump_file):
dump = []
with open(dump_file, 'rb') as in_fd:
while True:
try:
step = six.moves.cPickle.load(in_fd)
except EOFError:
return dump
dump.append(step)
def dump_to_txt(self, dump_file, output, include_debug):
with open(output, 'w') as out_fd:
dump = self.load_dump(dump_file)
if not include_debug:
for s in dump:
if 'debug' in s:
del s['debug']
with open(output, 'w') as f:
f.write(str(dump))
def dump_to_video(self, dump_file):
dump = self.load_dump(dump_file)
cfg = config.Config(dump[0]['debug']['config'])
cfg['dump_full_episodes'] = True
cfg['write_video'] = True
cfg['display_game_stats'] = True
processor = observation_processor.ObservationProcessor(cfg)
processor.write_dump('episode_done')
for frame in dump:
processor.update(frame)
def replay(self, dump, fps=10, config_update={}, directory=None, render=True):
replay = self.load_dump(dump)
trace = self.__modify_trace(replay, fps)
fd, temp_path = tempfile.mkstemp(suffix='.dump')
with open(temp_path, 'wb') as f:
for step in trace:
six.moves.cPickle.dump(step, f)
assert replay[0]['debug']['frame_cnt'] == 0, (
'Trace does not start from the beginning of the episode, can not replay')
cfg = config.Config(replay[0]['debug']['config'])
cfg['players'] = self.__build_players(temp_path, cfg['players'])
config_update['physics_steps_per_frame'] = int(100 / fps)
config_update['real_time'] = False
if directory:
config_update['tracesdir'] = directory
config_update['write_video'] = True
# my edition
# config_update['display_game_stats'] = False
# config_update['video_quality_level'] = 2
cfg.update(config_update)
env = football_env.FootballEnv(cfg)
if render:
env.render()
env.reset()
done = False
try:
while not done:
_, _, done, _ = env.step([])
except KeyboardInterrupt:
env.write_dump('shutdown')
exit(1)
os.close(fd)
| 4,449 | 32.712121 | 81 | py |
TiKick | TiKick-main/tmarl/envs/football/env/scenario_builder.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class responsible for generating scenarios."""
import importlib
import os
import pkgutil
import random
import sys
from absl import flags
from absl import logging
import gfootball_engine as libgame
Player = libgame.FormationEntry
Role = libgame.e_PlayerRole
Team = libgame.e_Team
FLAGS = flags.FLAGS
def all_scenarios():
path = os.path.abspath(__file__)
path = os.path.join(os.path.dirname(os.path.dirname(path)), 'scenarios')
scenarios = []
for m in pkgutil.iter_modules([path]):
# There was API change in pkgutil between Python 3.5 and 3.6...
if m.__class__ == tuple:
scenarios.append(m[1])
else:
scenarios.append(m.name)
return scenarios
class Scenario(object):
def __init__(self, config):
# Game config controls C++ engine and is derived from the main config.
self._scenario_cfg = libgame.ScenarioConfig.make()
self._config = config
self._active_team = Team.e_Left
scenario = None
try:
scenario = importlib.import_module('tmarl.envs.football.scenarios.{}'.format(config['level']))
except ImportError as e:
logging.error('Loading scenario "%s" failed' % config['level'])
logging.error(e)
sys.exit(1)
scenario.build_scenario(self)
self.SetTeam(libgame.e_Team.e_Left)
self._FakePlayersForEmptyTeam(self._scenario_cfg.left_team)
self.SetTeam(libgame.e_Team.e_Right)
self._FakePlayersForEmptyTeam(self._scenario_cfg.right_team)
self._BuildScenarioConfig()
def _FakePlayersForEmptyTeam(self, team):
if len(team) == 0:
self.AddPlayer(-1.000000, 0.420000, libgame.e_PlayerRole.e_PlayerRole_GK, True)
def _BuildScenarioConfig(self):
"""Builds scenario config from gfootball.environment config."""
self._scenario_cfg.real_time = self._config['real_time']
self._scenario_cfg.left_agents = self._config.number_of_left_players()
self._scenario_cfg.right_agents = self._config.number_of_right_players()
# This is needed to record 'game_engine_random_seed' in the dump.
if 'game_engine_random_seed' not in self._config._values:
self._config.set_scenario_value('game_engine_random_seed',
random.randint(0, 2000000000))
if not self._scenario_cfg.deterministic:
self._scenario_cfg.game_engine_random_seed = (
self._config['game_engine_random_seed'])
if 'reverse_team_processing' not in self._config:
self._config['reverse_team_processing'] = (
bool(self._config['game_engine_random_seed'] % 2))
if 'reverse_team_processing' in self._config:
self._scenario_cfg.reverse_team_processing = (
self._config['reverse_team_processing'])
def config(self):
return self._scenario_cfg
def SetTeam(self, team):
self._active_team = team
def AddPlayer(self, x, y, role, lazy=False, controllable=True):
"""Build player for the current scenario.
Args:
x: x coordinate of the player in the range [-1, 1].
y: y coordinate of the player in the range [-0.42, 0.42].
role: Player's role in the game (goal keeper etc.).
lazy: Computer doesn't perform any automatic actions for lazy player.
controllable: Whether player can be controlled.
"""
player = Player(x, y, role, lazy, controllable)
if self._active_team == Team.e_Left:
self._scenario_cfg.left_team.append(player)
else:
self._scenario_cfg.right_team.append(player)
def SetBallPosition(self, ball_x, ball_y):
self._scenario_cfg.ball_position[0] = ball_x
self._scenario_cfg.ball_position[1] = ball_y
def EpisodeNumber(self):
return self._config['episode_number']
def ScenarioConfig(self):
return self._scenario_cfg
| 4,305 | 33.725806 | 100 | py |
TiKick | TiKick-main/tmarl/envs/football/env/config.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config loader."""
from __future__ import print_function
import copy
from absl import flags
import gfootball_engine as libgame
FLAGS = flags.FLAGS
def parse_player_definition(definition):
"""Parses player definition.
An example of player definition is: "agent:players=4" or "replay:path=...".
Args:
definition: a string defining a player
Returns:
A tuple (name, dict).
"""
name = definition
d = {'left_players': 0,
'right_players': 0}
if ':' in definition:
(name, params) = definition.split(':')
for param in params.split(','):
(key, value) = param.split('=')
d[key] = value
if d['left_players'] == 0 and d['right_players'] == 0:
d['left_players'] = 1
return name, d
def count_players(definition):
"""Returns a number of players given a definition."""
_, player_definition = parse_player_definition(definition)
return (int(player_definition['left_players']) +
int(player_definition['right_players']))
def count_left_players(definition):
"""Returns a number of left players given a definition."""
return int(parse_player_definition(definition)[1]['left_players'])
def count_right_players(definition):
"""Returns a number of players given a definition."""
return int(parse_player_definition(definition)[1]['right_players'])
def get_agent_number_of_players(players):
"""Returns a total number of players controlled by an agent."""
return sum([count_players(player) for player in players
if player.startswith('agent')])
class Config(object):
def __init__(self, values=None):
self._values = {
'action_set': 'default',
'custom_display_stats': None,
'display_game_stats': True,
'dump_full_episodes': False,
'dump_scores': False,
'players': ['agent:left_players=1'],
'level': '11_vs_11_stochastic',
'physics_steps_per_frame': 10,
'render_resolution_x': 1280,
'real_time': False,
'tracesdir': '/tmp/dumps',
'video_format': 'avi',
'video_quality_level': 0, # 0 - low, 1 - medium, 2 - high
'write_video': False
}
self._values['render_resolution_y'] = int(
0.5625 * self._values['render_resolution_x'])
if values:
self._values.update(values)
self.NewScenario()
def number_of_left_players(self):
return sum([count_left_players(player)
for player in self._values['players']])
def number_of_right_players(self):
return sum([count_right_players(player)
for player in self._values['players']])
def number_of_players_agent_controls(self):
return get_agent_number_of_players(self._values['players'])
def __eq__(self, other):
assert isinstance(other, self.__class__)
return self._values == other._values and self._scenario_values == other._scenario_values
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, key):
if key in self._scenario_values:
return self._scenario_values[key]
return self._values[key]
def __setitem__(self, key, value):
self._values[key] = value
def __contains__(self, key):
return key in self._scenario_values or key in self._values
def get_dictionary(self):
cfg = copy.deepcopy(self._values)
cfg.update(self._scenario_values)
return cfg
def set_scenario_value(self, key, value):
"""Override value of specific config key for a single episode."""
self._scenario_values[key] = value
def serialize(self):
return self._values
def update(self, config):
self._values.update(config)
def ScenarioConfig(self):
return self._scenario_cfg
def NewScenario(self, inc = 1):
if 'episode_number' not in self._values:
self._values['episode_number'] = 0
self._values['episode_number'] += inc
self._scenario_values = {}
from tmarl.envs.football.env import scenario_builder
self._scenario_cfg = scenario_builder.Scenario(self).ScenarioConfig()
| 4,590 | 28.811688 | 92 | py |
TiKick | TiKick-main/tmarl/envs/football/env/__init__.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GFootball Environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tmarl.envs.football.env import config
from gfootball.env import football_env
from gfootball.env import observation_preprocessing
from gfootball.env import wrappers
def _process_reward_wrappers(env, rewards):
assert 'scoring' in rewards.split(',')
if 'checkpoints' in rewards.split(','):
env = wrappers.CheckpointRewardWrapper(env)
return env
def _process_representation_wrappers(env, representation, channel_dimensions):
"""Wraps with necessary representation wrappers.
Args:
env: A GFootball gym environment.
representation: See create_environment.representation comment.
channel_dimensions: (width, height) tuple that represents the dimensions of
SMM or pixels representation.
Returns:
Google Research Football environment.
"""
if representation.startswith('pixels'):
env = wrappers.PixelsStateWrapper(env, 'gray' in representation,
channel_dimensions)
elif representation == 'simple115':
env = wrappers.Simple115StateWrapper(env)
elif representation == 'simple115v2':
env = wrappers.Simple115StateWrapper(env, True)
elif representation == 'extracted':
env = wrappers.SMMWrapper(env, channel_dimensions)
elif representation == 'raw':
pass
else:
raise ValueError('Unsupported representation: {}'.format(representation))
return env
def _apply_output_wrappers(env, rewards, representation, channel_dimensions,
apply_single_agent_wrappers, stacked):
"""Wraps with necessary wrappers modifying the output of the environment.
Args:
env: A GFootball gym environment.
rewards: What rewards to apply.
representation: See create_environment.representation comment.
channel_dimensions: (width, height) tuple that represents the dimensions of
SMM or pixels representation.
apply_single_agent_wrappers: Whether to reduce output to single agent case.
stacked: Should observations be stacked.
Returns:
Google Research Football environment.
"""
env = _process_reward_wrappers(env, rewards)
env = _process_representation_wrappers(env, representation,
channel_dimensions)
if apply_single_agent_wrappers:
if representation != 'raw':
env = wrappers.SingleAgentObservationWrapper(env)
env = wrappers.SingleAgentRewardWrapper(env)
if stacked:
env = wrappers.FrameStack(env, 4)
env = wrappers.GetStateWrapper(env)
return env
def create_environment(env_name='',
stacked=False,
representation='extracted',
rewards='scoring',
write_goal_dumps=False,
write_full_episode_dumps=False,
render=False,
write_video=False,
dump_frequency=1,
logdir='',
extra_players=None,
number_of_left_players_agent_controls=1,
number_of_right_players_agent_controls=0,
channel_dimensions=(
observation_preprocessing.SMM_WIDTH,
observation_preprocessing.SMM_HEIGHT),
other_config_options={}):
"""Creates a Google Research Football environment.
Args:
env_name: a name of a scenario to run, e.g. "11_vs_11_stochastic".
The list of scenarios can be found in directory "scenarios".
stacked: If True, stack 4 observations, otherwise, only the last
observation is returned by the environment.
Stacking is only possible when representation is one of the following:
"pixels", "pixels_gray" or "extracted".
In that case, the stacking is done along the last (i.e. channel)
dimension.
representation: String to define the representation used to build
the observation. It can be one of the following:
'pixels': the observation is the rendered view of the football field
downsampled to 'channel_dimensions'. The observation size is:
'channel_dimensions'x3 (or 'channel_dimensions'x12 when "stacked" is
True).
'pixels_gray': the observation is the rendered view of the football field
in gray scale and downsampled to 'channel_dimensions'. The observation
size is 'channel_dimensions'x1 (or 'channel_dimensions'x4 when stacked
is True).
'extracted': also referred to as super minimap. The observation is
composed of 4 planes of size 'channel_dimensions'.
Its size is then 'channel_dimensions'x4 (or 'channel_dimensions'x16 when
stacked is True).
The first plane P holds the position of players on the left
team, P[y,x] is 255 if there is a player at position (x,y), otherwise,
its value is 0.
The second plane holds in the same way the position of players
on the right team.
The third plane holds the position of the ball.
The last plane holds the active player.
'simple115'/'simple115v2': the observation is a vector of size 115.
It holds:
- the ball_position and the ball_direction as (x,y,z)
- one hot encoding of who controls the ball.
[1, 0, 0]: nobody, [0, 1, 0]: left team, [0, 0, 1]: right team.
- one hot encoding of size 11 to indicate who is the active player
in the left team.
- 11 (x,y) positions for each player of the left team.
- 11 (x,y) motion vectors for each player of the left team.
- 11 (x,y) positions for each player of the right team.
- 11 (x,y) motion vectors for each player of the right team.
- one hot encoding of the game mode. Vector of size 7 with the
following meaning:
{NormalMode, KickOffMode, GoalKickMode, FreeKickMode,
CornerMode, ThrowInMode, PenaltyMode}.
Can only be used when the scenario is a flavor of normal game
(i.e. 11 versus 11 players).
rewards: Comma separated list of rewards to be added.
Currently supported rewards are 'scoring' and 'checkpoints'.
write_goal_dumps: whether to dump traces up to 200 frames before goals.
write_full_episode_dumps: whether to dump traces for every episode.
render: whether to render game frames.
Must be enable when rendering videos or when using pixels
representation.
write_video: whether to dump videos when a trace is dumped.
dump_frequency: how often to write dumps/videos (in terms of # of episodes)
Sub-sample the episodes for which we dump videos to save some disk space.
logdir: directory holding the logs.
extra_players: A list of extra players to use in the environment.
Each player is defined by a string like:
"$player_name:left_players=?,right_players=?,$param1=?,$param2=?...."
number_of_left_players_agent_controls: Number of left players an agent
controls.
number_of_right_players_agent_controls: Number of right players an agent
controls.
channel_dimensions: (width, height) tuple that represents the dimensions of
SMM or pixels representation.
other_config_options: dict that allows directly setting other options in
the Config
Returns:
Google Research Football environment.
"""
assert env_name
scenario_config = config.Config({'level': env_name}).ScenarioConfig()
players = [('agent:left_players=%d,right_players=%d' % (
number_of_left_players_agent_controls,
number_of_right_players_agent_controls))]
# Enable MultiAgentToSingleAgent wrapper?
multiagent_to_singleagent = False
if scenario_config.control_all_players:
if (number_of_left_players_agent_controls in [0, 1] and
number_of_right_players_agent_controls in [0, 1]):
multiagent_to_singleagent = True
players = [('agent:left_players=%d,right_players=%d' %
(scenario_config.controllable_left_players
if number_of_left_players_agent_controls else 0,
scenario_config.controllable_right_players
if number_of_right_players_agent_controls else 0))]
if extra_players is not None:
players.extend(extra_players)
config_values = {
'dump_full_episodes': write_full_episode_dumps,
'dump_scores': write_goal_dumps,
'players': players,
'level': env_name,
'tracesdir': logdir,
'write_video': write_video,
}
config_values.update(other_config_options)
c = config.Config(config_values)
env = football_env.FootballEnv(c)
if multiagent_to_singleagent:
env = wrappers.MultiAgentToSingleAgent(
env, number_of_left_players_agent_controls,
number_of_right_players_agent_controls)
if dump_frequency > 1:
env = wrappers.PeriodicDumpWriter(env, dump_frequency, render)
elif render:
env.render()
env = _apply_output_wrappers(
env, rewards, representation, channel_dimensions,
(number_of_left_players_agent_controls +
number_of_right_players_agent_controls == 1), stacked)
return env
def create_remote_environment(
username,
token,
model_name='',
track='',
stacked=False,
representation='raw',
rewards='scoring',
channel_dimensions=(
observation_preprocessing.SMM_WIDTH,
observation_preprocessing.SMM_HEIGHT),
include_rendering=False):
"""Creates a remote Google Research Football environment.
Args:
username: User name.
token: User token.
model_name: A model identifier to be displayed on the leaderboard.
track: which competition track to connect to.
stacked: If True, stack 4 observations, otherwise, only the last
observation is returned by the environment.
Stacking is only possible when representation is one of the following:
"pixels", "pixels_gray" or "extracted".
In that case, the stacking is done along the last (i.e. channel)
dimension.
representation: See create_environment.representation comment.
rewards: Comma separated list of rewards to be added.
Currently supported rewards are 'scoring' and 'checkpoints'.
channel_dimensions: (width, height) tuple that represents the dimensions of
SMM or pixels representation.
include_rendering: Whether to return frame as part of the output.
Returns:
Google Research Football environment.
"""
from gfootball.env import remote_football_env
env = remote_football_env.RemoteFootballEnv(
username, token, model_name=model_name, track=track,
include_rendering=include_rendering)
env = _apply_output_wrappers(
env, rewards, representation, channel_dimensions,
env._config.number_of_players_agent_controls() == 1, stacked)
return env
| 11,541 | 41.748148 | 80 | py |
TiKick | TiKick-main/tmarl/envs/football/env/football_env.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allows different types of players to play against each other."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import importlib
from absl import logging
from tmarl.envs.football.env import config as cfg
from gfootball.env import constants
from gfootball.env import football_action_set
from tmarl.envs.football.env import football_env_core
from gfootball.env import observation_rotation
import gym
import numpy as np
class FootballEnv(gym.Env):
"""Allows multiple players to play in the same environment."""
def __init__(self, config):
self._config = config
player_config = {'index': 0}
# There can be at most one agent at a time. We need to remember its
# team and the index on the team to generate observations appropriately.
self._agent = None
self._agent_index = -1
self._agent_left_position = -1
self._agent_right_position = -1
self._players = self._construct_players(config['players'], player_config)
self._env = football_env_core.FootballEnvCore(self._config)
self._num_actions = len(football_action_set.get_action_set(self._config))
self._cached_observation = None
@property
def action_space(self):
if self._config.number_of_players_agent_controls() > 1:
return gym.spaces.MultiDiscrete(
[self._num_actions] * self._config.number_of_players_agent_controls())
return gym.spaces.Discrete(self._num_actions)
def _construct_players(self, definitions, config):
result = []
left_position = 0
right_position = 0
for definition in definitions:
(name, d) = cfg.parse_player_definition(definition)
config_name = 'player_{}'.format(name)
if config_name in config:
config[config_name] += 1
else:
config[config_name] = 0
try:
player_factory = importlib.import_module(
'gfootball.env.players.{}'.format(name))
except ImportError as e:
logging.error('Failed loading player "%s"', name)
logging.error(e)
exit(1)
player_config = copy.deepcopy(config)
player_config.update(d)
player = player_factory.Player(player_config, self._config)
if name == 'agent':
assert not self._agent, 'Only one \'agent\' player allowed'
self._agent = player
self._agent_index = len(result)
self._agent_left_position = left_position
self._agent_right_position = right_position
result.append(player)
left_position += player.num_controlled_left_players()
right_position += player.num_controlled_right_players()
config['index'] += 1
return result
def _convert_observations(self, original, player,
left_player_position, right_player_position):
"""Converts generic observations returned by the environment to
the player specific observations.
Args:
original: original observations from the environment.
player: player for which to generate observations.
left_player_position: index into observation corresponding to the left
player.
right_player_position: index into observation corresponding to the right
player.
"""
observations = []
for is_left in [True, False]:
adopted = original if is_left or player.can_play_right(
) else observation_rotation.flip_observation(original, self._config)
prefix = 'left' if is_left or not player.can_play_right() else 'right'
position = left_player_position if is_left else right_player_position
for x in range(player.num_controlled_left_players() if is_left
else player.num_controlled_right_players()):
o = {}
for v in constants.EXPOSED_OBSERVATIONS:
o[v] = copy.deepcopy(adopted[v])
assert (len(adopted[prefix + '_agent_controlled_player']) == len(
adopted[prefix + '_agent_sticky_actions']))
o['designated'] = adopted[prefix + '_team_designated_player']
if position + x >= len(adopted[prefix + '_agent_controlled_player']):
o['active'] = -1
o['sticky_actions'] = []
else:
o['active'] = (
adopted[prefix + '_agent_controlled_player'][position + x])
o['sticky_actions'] = np.array(copy.deepcopy(
adopted[prefix + '_agent_sticky_actions'][position + x]))
# There is no frame for players on the right ATM.
if is_left and 'frame' in original:
o['frame'] = original['frame']
observations.append(o)
return observations
def _action_to_list(self, a):
if isinstance(a, np.ndarray):
return a.tolist()
if not isinstance(a, list):
return [a]
return a
def _get_actions(self):
obs = self._env.observation()
left_actions = []
right_actions = []
left_player_position = 0
right_player_position = 0
for player in self._players:
adopted_obs = self._convert_observations(obs, player,
left_player_position,
right_player_position)
left_player_position += player.num_controlled_left_players()
right_player_position += player.num_controlled_right_players()
a = self._action_to_list(player.take_action(adopted_obs))
assert len(adopted_obs) == len(
a), 'Player provided {} actions instead of {}.'.format(
len(a), len(adopted_obs))
if not player.can_play_right():
for x in range(player.num_controlled_right_players()):
index = x + player.num_controlled_left_players()
a[index] = observation_rotation.flip_single_action(
a[index], self._config)
left_actions.extend(a[:player.num_controlled_left_players()])
right_actions.extend(a[player.num_controlled_left_players():])
actions = left_actions + right_actions
return actions
def step(self, action):
action = self._action_to_list(action)
if self._agent:
self._agent.set_action(action)
else:
assert len(
action
) == 0, 'step() received {} actions, but no agent is playing.'.format(
len(action))
_, reward, done, info = self._env.step(self._get_actions())
score_reward = reward
if self._agent:
reward = ([reward] * self._agent.num_controlled_left_players() +
[-reward] * self._agent.num_controlled_right_players())
self._cached_observation = None
info['score_reward'] = score_reward
return (self.observation(), np.array(reward, dtype=np.float32), done, info)
def reset(self):
self._env.reset()
for player in self._players:
player.reset()
self._cached_observation = None
return self.observation()
def observation(self):
if not self._cached_observation:
self._cached_observation = self._env.observation()
if self._agent:
self._cached_observation = self._convert_observations(
self._cached_observation, self._agent,
self._agent_left_position, self._agent_right_position)
return self._cached_observation
def write_dump(self, name):
return self._env.write_dump(name)
def close(self):
self._env.close()
def get_state(self, to_pickle={}):
return self._env.get_state(to_pickle)
def set_state(self, state):
self._cached_observation = None
return self._env.set_state(state)
def tracker_setup(self, start, end):
self._env.tracker_setup(start, end)
def render(self, mode='human'):
self._cached_observation = None
return self._env.render(mode=mode)
def disable_render(self):
self._cached_observation = None
return self._env.disable_render()
| 8,348 | 36.272321 | 80 | py |
TiKick | TiKick-main/tmarl/algorithms/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_algorithm.py | import torch
from tmarl.utils.valuenorm import ValueNorm
# implement the loss of the MAPPO here
class MAPPOAlgorithm():
def __init__(self,
args,
init_module,
device=torch.device("cpu")):
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.algo_module = init_module
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.policy_value_loss_coef = args.policy_value_loss_coef
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
self._use_policy_vhead = args.use_policy_vhead
assert (self._use_popart and self._use_valuenorm) == False, ("self._use_popart and self._use_valuenorm can not be set True simultaneously")
if self._use_popart:
self.value_normalizer = self.algo_module.critic.v_out
if self._use_policy_vhead:
self.policy_value_normalizer = self.algo_module.actor.v_out
elif self._use_valuenorm:
self.value_normalizer = ValueNorm(1, device = self.device)
if self._use_policy_vhead:
self.policy_value_normalizer = ValueNorm(1, device = self.device)
else:
self.value_normalizer = None
if self._use_policy_vhead:
self.policy_value_normalizer = None
def prep_rollout(self):
self.algo_module.actor.eval()
| 2,234 | 38.210526 | 147 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_module.py | import torch
from tmarl.networks.policy_network import PolicyNetwork
class MAPPOModule:
def __init__(self, args, obs_space, share_obs_space, act_space, device=torch.device("cpu")):
self.device = device
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = share_obs_space
self.act_space = act_space
self.actor = PolicyNetwork(args, self.obs_space, self.act_space, self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr, eps=self.opti_eps, weight_decay=self.weight_decay)
def get_actions(self, share_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None, deterministic=False):
actions, action_log_probs, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)
return None, actions, action_log_probs, rnn_states_actor, None | 1,050 | 41.04 | 135 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/loggers/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import time
def timer(function):
"""
装饰器函数timer
:param function:想要计时的函数
:return:
"""
def wrapper(*args, **kwargs):
time_start = time.time()
res = function(*args, **kwargs)
cost_time = time.time() - time_start
print("{} running time: {}s".format(function.__name__, cost_time))
return res
return wrapper | 1,011 | 27.914286 | 74 | py |
TiKick | TiKick-main/tmarl/loggers/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/loggers/TSee/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/replay_buffers/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/replay_buffers/normal/shared_buffer.py | import torch
import numpy as np
from collections import defaultdict
from tmarl.utils.util import check,get_shape_from_obs_space, get_shape_from_act_space
def _flatten(T, N, x):
return x.reshape(T * N, *x.shape[2:])
def _cast(x):
return x.transpose(1, 2, 0, 3).reshape(-1, *x.shape[3:])
class SharedReplayBuffer(object):
def __init__(self, args, num_agents, obs_space, share_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
self._mixed_obs = False # for mixed observation
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(share_obs_space)
# for mixed observation
if 'Dict' in obs_shape.__class__.__name__:
self._mixed_obs = True
self.obs = {}
self.share_obs = {}
for key in obs_shape:
self.obs[key] = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape[key].shape), dtype=np.float32)
for key in share_obs_shape:
self.share_obs[key] = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape[key].shape), dtype=np.float32)
else:
# deal with special attn format
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape), dtype=np.float32)
self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32)
self.rnn_states = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.returns = np.zeros_like(self.value_preds)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_space.n), dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.action_log_probs = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.rewards = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
if self._mixed_obs:
for key in self.share_obs.keys():
self.share_obs[key][self.step + 1] = share_obs[key].copy()
for key in self.obs.keys():
self.obs[key][self.step + 1] = obs[key].copy()
else:
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def init_buffer(self,share_obs,obs):
self.share_obs[0] = share_obs
self.obs[0] = obs
def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
if self._mixed_obs:
for key in self.share_obs.keys():
self.share_obs[key][0] = self.share_obs[key][-1].copy()
for key in self.obs.keys():
self.obs[key][0] = self.obs[key][-1].copy()
else:
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
# step + 1
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * gae * self.masks[step + 1]
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) * number of agents ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length, num_agents, n_rollout_threads * episode_length * num_agents,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
share_obs[key] = self.share_obs[key][:-1].reshape(-1, *self.share_obs[key].shape[3:])
for key in self.obs.keys():
obs[key] = self.obs[key][:-1].reshape(-1, *self.obs[key].shape[3:])
else:
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:])
rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim]
if self._mixed_obs:
share_obs_batch = {}
obs_batch = {}
for key in share_obs.keys():
share_obs_batch[key] = share_obs[key][indices]
for key in obs.keys():
obs_batch[key] = obs[key][indices]
else:
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads*num_agents
assert n_rollout_threads*num_agents >= num_mini_batch, (
"PPO requires the number of processes ({})* number of agents ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_agents, num_mini_batch))
num_envs_per_batch = batch_size // num_mini_batch
perm = torch.randperm(batch_size).numpy()
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
share_obs[key] = self.share_obs[key].reshape(-1, batch_size, *self.share_obs[key].shape[3:])
for key in self.obs.keys():
obs[key] = self.obs[key].reshape(-1, batch_size, *self.obs[key].shape[3:])
else:
share_obs = self.share_obs.reshape(-1, batch_size, *self.share_obs.shape[3:])
obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:])
rnn_states = self.rnn_states.reshape(-1, batch_size, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic.reshape(-1, batch_size, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions.reshape(-1, batch_size, self.available_actions.shape[-1])
value_preds = self.value_preds.reshape(-1, batch_size, 1)
returns = self.returns.reshape(-1, batch_size, 1)
masks = self.masks.reshape(-1, batch_size, 1)
active_masks = self.active_masks.reshape(-1, batch_size, 1)
action_log_probs = self.action_log_probs.reshape(-1, batch_size, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, batch_size, 1)
for start_ind in range(0, batch_size, num_envs_per_batch):
if self._mixed_obs:
share_obs_batch = defaultdict(list)
obs_batch = defaultdict(list)
else:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
if self._mixed_obs:
for key in share_obs.keys():
share_obs_batch[key].append(share_obs[key][:-1, ind])
for key in obs.keys():
obs_batch[key].append(obs[key][:-1, ind])
else:
share_obs_batch.append(share_obs[:-1, ind])
obs_batch.append(obs[:-1, ind])
rnn_states_batch.append(rnn_states[0:1, ind])
rnn_states_critic_batch.append(rnn_states_critic[0:1, ind])
actions_batch.append(actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(available_actions[:-1, ind])
value_preds_batch.append(value_preds[:-1, ind])
return_batch.append(returns[:-1, ind])
masks_batch.append(masks[:-1, ind])
active_masks_batch.append(active_masks[:-1, ind])
old_action_log_probs_batch.append(action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = np.stack(share_obs_batch[key], 1)
for key in obs_batch.keys():
obs_batch[key] = np.stack(obs_batch[key], 1)
else:
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, dim) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = _flatten(T, N, share_obs_batch[key])
for key in obs_batch.keys():
obs_batch[key] = _flatten(T, N, obs_batch[key])
else:
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
data_chunks = batch_size // data_chunk_length # [C=r*T*M/L]
mini_batch_size = data_chunks // num_mini_batch
assert n_rollout_threads * episode_length * num_agents >= data_chunk_length, (
"PPO requires the number of processes ({})* number of agents ({}) * episode length ({}) "
"to be greater than or equal to the number of "
"data chunk length ({}).".format(n_rollout_threads, num_agents, episode_length ,data_chunk_length))
rand = torch.randperm(data_chunks).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
if len(self.share_obs[key].shape) == 6:
share_obs[key] = self.share_obs[key][:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs[key].shape[3:])
elif len(self.share_obs[key].shape) == 5:
share_obs[key] = self.share_obs[key][:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.share_obs[key].shape[3:])
else:
share_obs[key] = _cast(self.share_obs[key][:-1])
for key in self.obs.keys():
if len(self.obs[key].shape) == 6:
obs[key] = self.obs[key][:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs[key].shape[3:])
elif len(self.obs[key].shape) == 5:
obs[key] = self.obs[key][:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.obs[key].shape[3:])
else:
obs[key] = _cast(self.obs[key][:-1])
else:
if len(self.share_obs.shape) > 4:
share_obs = self.share_obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs.shape[3:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states_critic.shape[3:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
if self._mixed_obs:
share_obs_batch = defaultdict(list)
obs_batch = defaultdict(list)
else:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim]
if self._mixed_obs:
for key in share_obs.keys():
share_obs_batch[key].append(share_obs[key][ind:ind+data_chunk_length])
for key in obs.keys():
obs_batch[key].append(obs[key][ind:ind+data_chunk_length])
else:
share_obs_batch.append(share_obs[ind:ind+data_chunk_length])
obs_batch.append(obs[ind:ind+data_chunk_length])
actions_batch.append(actions[ind:ind+data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(available_actions[ind:ind+data_chunk_length])
value_preds_batch.append(value_preds[ind:ind+data_chunk_length])
return_batch.append(returns[ind:ind+data_chunk_length])
masks_batch.append(masks[ind:ind+data_chunk_length])
active_masks_batch.append(active_masks[ind:ind+data_chunk_length])
old_action_log_probs_batch.append(action_log_probs[ind:ind+data_chunk_length])
adv_targ.append(advantages[ind:ind+data_chunk_length])
# size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (L, N, Dim)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = np.stack(share_obs_batch[key], axis=1)
for key in obs_batch.keys():
obs_batch[key] = np.stack(obs_batch[key], axis=1)
else:
share_obs_batch = np.stack(share_obs_batch, axis=1)
obs_batch = np.stack(obs_batch, axis=1)
actions_batch = np.stack(actions_batch, axis=1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, axis=1)
value_preds_batch = np.stack(value_preds_batch, axis=1)
return_batch = np.stack(return_batch, axis=1)
masks_batch = np.stack(masks_batch, axis=1)
active_masks_batch = np.stack(active_masks_batch, axis=1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, axis=1)
adv_targ = np.stack(adv_targ, axis=1)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = _flatten(L, N, share_obs_batch[key])
for key in obs_batch.keys():
obs_batch[key] = _flatten(L, N, obs_batch[key])
else:
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
| 28,769 | 52.081181 | 231 | py |
TiKick | TiKick-main/tmarl/replay_buffers/normal/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/configs/config.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import argparse
def get_config():
parser = argparse.ArgumentParser(
description='TiKick', formatter_class=argparse.RawDescriptionHelpFormatter)
# prepare parameters
parser.add_argument("--algorithm_name", type=str,
default='rmappo', choices=["rmappo"])
parser.add_argument("--experiment_name", type=str, default="check",
help="an identifier to distinguish different experiment.")
parser.add_argument("--seed", type=int, default=1,
help="Random seed for numpy/torch")
parser.add_argument("--disable_cuda", action='store_true', default=False,
help="by default False, will use GPU to train; or else will use CPU;")
parser.add_argument("--cuda_deterministic",
action='store_false', default=True,
help="by default, make sure random seed effective. if set, bypass such function.")
parser.add_argument("--n_rollout_threads", type=int, default=2,
help="Number of parallel envs for training rollout")
parser.add_argument("--n_eval_rollout_threads", type=int, default=1,
help="Number of parallel envs for evaluating rollout")
parser.add_argument("--n_render_rollout_threads", type=int, default=1,
help="Number of parallel envs for rendering rollout")
parser.add_argument("--eval_num", type=int, default=1,
help='Number of environment steps to evaluate (default: 1)')
# env parameters
parser.add_argument("--env_name", type=str, default='StarCraft2',
help="specify the name of environment")
parser.add_argument("--use_obs_instead_of_state", action='store_true',
default=False, help="Whether to use global state or concatenated obs")
# replay buffer parameters
parser.add_argument("--episode_length", type=int,
default=200, help="Max length for any episode")
# network parameters
parser.add_argument("--separate_policy", action='store_true',
default=False, help='Whether agent seperate the policy')
parser.add_argument("--use_centralized_V", action='store_false',
default=True, help="Whether to use centralized V function")
parser.add_argument("--use_conv1d", action='store_true',
default=False, help="Whether to use conv1d")
parser.add_argument("--stacked_frames", type=int, default=1,
help="Dimension of hidden layers for actor/critic networks")
parser.add_argument("--use_stacked_frames", action='store_true',
default=False, help="Whether to use stacked_frames")
parser.add_argument("--hidden_size", type=int, default=256,
help="Dimension of hidden layers for actor/critic networks") # TODO @zoeyuchao. The same comment might in need of change.
parser.add_argument("--layer_N", type=int, default=3,
help="Number of layers for actor/critic networks")
parser.add_argument("--activation_id", type=int,
default=1, help="choose 0 to use tanh, 1 to use relu, 2 to use leaky relu, 3 to use elu")
parser.add_argument("--use_popart", action='store_true', default=False,
help="by default False, use PopArt to normalize rewards.")
parser.add_argument("--use_valuenorm", action='store_false', default=True,
help="by default True, use running mean and std to normalize rewards.")
parser.add_argument("--use_feature_normalization", action='store_false',
default=True, help="Whether to apply layernorm to the inputs")
parser.add_argument("--use_orthogonal", action='store_false', default=True,
help="Whether to use Orthogonal initialization for weights and 0 initialization for biases")
parser.add_argument("--gain", type=float, default=0.01,
help="The gain # of last action layer")
parser.add_argument("--cnn_layers_params", type=str, default=None,
help="The parameters of cnn layer")
parser.add_argument("--use_maxpool2d", action='store_true',
default=False, help="Whether to apply layernorm to the inputs")
# recurrent parameters
parser.add_argument("--use_naive_recurrent_policy", action='store_true',
default=False, help='Whether to use a naive recurrent policy')
parser.add_argument("--use_recurrent_policy", action='store_false',
default=True, help='use a recurrent policy')
parser.add_argument("--recurrent_N", type=int, default=1,
help="The number of recurrent layers.")
parser.add_argument("--data_chunk_length", type=int, default=25,
help="Time length of chunks used to train a recurrent_policy")
parser.add_argument("--use_influence_policy", action='store_true',
default=False, help='use a recurrent policy')
parser.add_argument("--influence_layer_N", type=int, default=1,
help="Number of layers for actor/critic networks")
# optimizer parameters
parser.add_argument("--lr", type=float, default=5e-4,
help='learning rate (default: 5e-4)')
parser.add_argument("--tau", type=float, default=0.995,
help='soft update polyak (default: 0.995)')
parser.add_argument("--critic_lr", type=float, default=5e-4,
help='critic learning rate (default: 5e-4)')
parser.add_argument("--opti_eps", type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument("--weight_decay", type=float, default=0)
# ppo parameters
parser.add_argument("--ppo_epoch", type=int, default=15,
help='number of ppo epochs (default: 15)')
parser.add_argument("--use_policy_vhead",
action='store_true', default=False,
help="by default, do not use policy vhead. if set, use policy vhead.")
parser.add_argument("--use_clipped_value_loss",
action='store_false', default=True,
help="by default, clip loss value. If set, do not clip loss value.")
parser.add_argument("--clip_param", type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument("--num_mini_batch", type=int, default=1,
help='number of batches for ppo (default: 1)')
parser.add_argument("--policy_value_loss_coef", type=float,
default=1, help='policy value loss coefficient (default: 0.5)')
parser.add_argument("--entropy_coef", type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument("--value_loss_coef", type=float,
default=1, help='value loss coefficient (default: 0.5)')
parser.add_argument("--use_max_grad_norm",
action='store_false', default=True,
help="by default, use max norm of gradients. If set, do not use.")
parser.add_argument("--max_grad_norm", type=float, default=10.0,
help='max norm of gradients (default: 0.5)')
parser.add_argument("--use_gae", action='store_false',
default=True, help='use generalized advantage estimation')
parser.add_argument("--gamma", type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument("--gae_lambda", type=float, default=0.95,
help='gae lambda parameter (default: 0.95)')
parser.add_argument("--use_proper_time_limits", action='store_true',
default=False, help='compute returns taking into account time limits')
parser.add_argument("--use_huber_loss", action='store_false', default=True,
help="by default, use huber loss. If set, do not use huber loss.")
parser.add_argument("--use_value_active_masks",
action='store_false', default=True,
help="by default True, whether to mask useless data in value loss.")
parser.add_argument("--use_policy_active_masks",
action='store_false', default=True,
help="by default True, whether to mask useless data in policy loss.")
parser.add_argument("--huber_delta", type=float,
default=10.0, help=" coefficience of huber loss.")
# save parameters
parser.add_argument("--save_interval", type=int, default=1,
help="time duration between contiunous twice models saving.")
# log parameters
parser.add_argument("--log_interval", type=int, default=5,
help="time duration between contiunous twice log printing.")
# eval parameters
parser.add_argument("--use_eval", action='store_true', default=False,
help="by default, do not start evaluation. If set`, start evaluation alongside with training.")
parser.add_argument("--eval_interval", type=int, default=25,
help="time duration between contiunous twice evaluation progress.")
parser.add_argument("--eval_episodes", type=int, default=64,
help="number of episodes of a single evaluation.")
# pretrained parameters
parser.add_argument("--model_dir", type=str, default=None,
help="by default None. set the path to pretrained model.")
parser.add_argument("--replay_save_dir", type=str, default=None,
help="replay file save dir")
# replay buffer parameters
return parser
| 10,665 | 55.734043 | 146 | py |
TiKick | TiKick-main/tmarl/configs/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/wrappers/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/wrappers/TWrapper/__init__.py | 0 | 0 | 0 | py |
Dataset Card for "ArtifactAI/arxiv_python_research_code"
Dataset Description
https://huggingface.co/datasets/AlgorithmicResearchGroup/arxiv_python_research_code
Dataset Summary
AlgorithmicResearchGroup/arxiv_python_research_code contains over 4.13GB of source code files referenced strictly in ArXiv papers. The dataset serves as a curated dataset for Code LLMs.
How to use it
from datasets import load_dataset
# full dataset (4.13GB of data)
ds = load_dataset("AlgorithmicResearchGroup/arxiv_python_research_code", split="train")
# dataset streaming (will only download the data as needed)
ds = load_dataset("AlgorithmicResearchGroup/arxiv_python_research_code", streaming=True, split="train")
for sample in iter(ds): print(sample["code"])
Dataset Structure
Data Instances
Each data instance corresponds to one file. The content of the file is in the code
feature, and other features (repo
, file
, etc.) provide some metadata.
Data Fields
repo
(string): code repository name.file
(string): file path in the repository.code
(string): code within the file.file_length
: (integer): number of characters in the file.avg_line_length
: (float): the average line-length of the file.max_line_length
: (integer): the maximum line-length of the file.extension_type
: (string): file extension.
Data Splits
The dataset has no splits and all data is loaded as train split by default.
Dataset Creation
Source Data
Initial Data Collection and Normalization
34,099 active GitHub repository names were extracted from ArXiv papers from its inception through July 21st, 2023 totaling 773G of compressed github repositories.
These repositories were then filtered, and the code from each '.py' file extension was extracted into 1.4 million files.
Who are the source language producers?
The source (code) language producers are users of GitHub that created unique repository
Personal and Sensitive Information
The released dataset may contain sensitive information such as emails, IP addresses, and API/ssh keys that have previously been published to public repositories on GitHub.
Additional Information
Dataset Curators
Matthew Kenney, AlgorithmicResearchGroup, matt@algorithmicresearchgroup.com
Citation Information
@misc{arxiv_python_research_code,
title={arxiv_python_research_code},
author={Matthew Kenney},
year={2023}
}
- Downloads last month
- 268