ppo-MountainCar-v0 / pyproject.toml
sgoodfriend's picture
PPO playing MountainCar-v0 from https://github.com/sgoodfriend/rl-algo-impls/tree/0511de345b17175b7cf1ea706c3e05981f11761c
1e1c086
raw
history blame
1.87 kB
[project]
name = "rl_algo_impls"
version = "0.0.8"
description = "Implementations of reinforcement learning algorithms"
authors = [
{name = "Scott Goodfriend", email = "goodfriend.scott@gmail.com"},
]
license = {file = "LICENSE"}
readme = "README.md"
requires-python = ">= 3.8"
classifiers = [
"License :: OSI Approved :: MIT License",
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
dependencies = [
"cmake",
"swig",
"scipy",
"torch",
"torchvision",
"tensorboard >= 2.11.2, < 2.12",
"AutoROM.accept-rom-license >= 0.4.2, < 0.5",
"stable-baselines3[extra] >= 1.7.0, < 1.8",
"gym[box2d] >= 0.21.0, < 0.22",
"pyglet == 1.5.27",
"wandb",
"pyvirtualdisplay",
"pybullet",
"tabulate",
"huggingface-hub",
"optuna",
"dash",
"kaleido",
"PyYAML",
"scikit-learn",
]
[tool.setuptools]
packages = ["rl_algo_impls"]
[project.optional-dependencies]
test = [
"pytest",
"black",
"mypy",
"flake8",
"flake8-bugbear",
"isort",
]
procgen = [
"numexpr >= 2.8.4",
"gym3",
"glfw >= 1.12.0, < 1.13",
"procgen; platform_machine=='x86_64'",
]
microrts-old = [
"numpy < 1.24.0", # Support for gym-microrts < 0.6.0
"gym-microrts == 0.2.0", # Match ppo-implementation-details
]
microrts = [
"numpy < 1.24.0", # Support for gym-microrts < 0.6.0
"gym-microrts == 0.3.2",
]
jupyter = [
"jupyter",
"notebook"
]
all = [
"rl-algo-impls[test]",
"rl-algo-impls[procgen]",
"rl-algo-impls[microrts]",
]
[project.urls]
"Homepage" = "https://github.com/sgoodfriend/rl-algo-impls"
[build-system]
requires = ["setuptools==65.5.0", "setuptools-scm"]
build-backend = "setuptools.build_meta"
[tool.isort]
profile = "black"