File size: 3,666 Bytes
1169e4d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
[tool.poetry]
name = "cleanrl"
version = "1.1.0"
description = "High-quality single file implementation of Deep Reinforcement Learning algorithms with research-friendly features"
authors = ["Costa Huang <costa.huang@outlook.com>"]
packages = [
{ include = "cleanrl" },
{ include = "cleanrl_utils" },
]
keywords = ["reinforcement", "machine", "learning", "research"]
license="MIT"
readme = "README.md"
[tool.poetry.dependencies]
python = ">=3.7.1,<3.11"
tensorboard = "^2.10.0"
wandb = "^0.13.11"
gym = "0.23.1"
torch = ">=1.12.1"
stable-baselines3 = "1.2.0"
gymnasium = ">=0.28.1"
moviepy = "^1.0.3"
pygame = "2.1.0"
huggingface-hub = "^0.11.1"
rich = "<12.0"
tenacity = "^8.2.2"
ale-py = {version = "0.7.4", optional = true}
AutoROM = {extras = ["accept-rom-license"], version = "^0.4.2", optional = true}
opencv-python = {version = "^4.6.0.66", optional = true}
procgen = {version = "^0.10.7", optional = true}
pytest = {version = "^7.1.3", optional = true}
mujoco = {version = "<=2.3.3", optional = true}
imageio = {version = "^2.14.1", optional = true}
free-mujoco-py = {version = "^2.1.6", optional = true}
mkdocs-material = {version = "^8.4.3", optional = true}
markdown-include = {version = "^0.7.0", optional = true}
openrlbenchmark = {version = "^0.1.1b4", optional = true}
jax = {version = "^0.3.17", optional = true}
jaxlib = {version = "^0.3.15", optional = true}
flax = {version = "^0.6.0", optional = true}
optuna = {version = "^3.0.1", optional = true}
optuna-dashboard = {version = "^0.7.2", optional = true}
envpool = {version = "^0.6.4", optional = true}
PettingZoo = {version = "1.18.1", optional = true}
SuperSuit = {version = "3.4.0", optional = true}
multi-agent-ale-py = {version = "0.1.11", optional = true}
boto3 = {version = "^1.24.70", optional = true}
awscli = {version = "^1.25.71", optional = true}
shimmy = {version = ">=1.0.0", extras = ["dm-control"], optional = true}
[tool.poetry.group.dev.dependencies]
pre-commit = "^2.20.0"
[tool.poetry.group.isaacgym]
optional = true
[tool.poetry.group.isaacgym.dependencies]
isaacgymenvs = {git = "https://github.com/vwxyzjn/IsaacGymEnvs.git", rev = "poetry", python = ">=3.7.1,<3.10"}
isaacgym = {path = "cleanrl/ppo_continuous_action_isaacgym/isaacgym", develop = true}
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.extras]
atari = ["ale-py", "AutoROM", "opencv-python"]
procgen = ["procgen"]
plot = ["pandas", "seaborn"]
pytest = ["pytest"]
mujoco = ["mujoco", "imageio"]
mujoco_py = ["free-mujoco-py"]
jax = ["jax", "jaxlib", "flax"]
docs = ["mkdocs-material", "markdown-include", "openrlbenchmark"]
envpool = ["envpool"]
optuna = ["optuna", "optuna-dashboard"]
pettingzoo = ["PettingZoo", "SuperSuit", "multi-agent-ale-py"]
cloud = ["boto3", "awscli"]
dm_control = ["shimmy", "mujoco"]
# dependencies for algorithm variant (useful when you want to run a specific algorithm)
dqn = []
dqn_atari = ["ale-py", "AutoROM", "opencv-python"]
dqn_jax = ["jax", "jaxlib", "flax"]
dqn_atari_jax = [
"ale-py", "AutoROM", "opencv-python", # atari
"jax", "jaxlib", "flax" # jax
]
c51 = []
c51_atari = ["ale-py", "AutoROM", "opencv-python"]
c51_jax = ["jax", "jaxlib", "flax"]
c51_atari_jax = [
"ale-py", "AutoROM", "opencv-python", # atari
"jax", "jaxlib", "flax" # jax
]
ppo_atari_envpool_xla_jax_scan = [
"ale-py", "AutoROM", "opencv-python", # atari
"jax", "jaxlib", "flax", # jax
"envpool", # envpool
]
qdagger_dqn_atari_impalacnn = [
"ale-py", "AutoROM", "opencv-python"
]
qdagger_dqn_atari_jax_impalacnn = [
"ale-py", "AutoROM", "opencv-python", # atari
"jax", "jaxlib", "flax", # jax
]
|