From a7fe18d02cff7ae32ddbf78461cac949a121d0bd Mon Sep 17 00:00:00 2001 From: YuriCat Date: Tue, 25 Jan 2022 11:40:26 +0900 Subject: [PATCH] feature: remove prepare_env() --- handyrl/environment.py | 11 ----------- handyrl/evaluation.py | 4 +--- handyrl/train.py | 3 +-- handyrl/worker.py | 3 +-- 4 files changed, 3 insertions(+), 18 deletions(-) diff --git a/handyrl/environment.py b/handyrl/environment.py index 9bca1713..4ed7b521 100755 --- a/handyrl/environment.py +++ b/handyrl/environment.py @@ -14,17 +14,6 @@ } -def prepare_env(env_args): - env_name = env_args['env'] - env_source = ENVS.get(env_name, env_name) - env_module = importlib.import_module(env_source) - - if env_module is None: - print("No environment %s" % env_name) - elif hasattr(env_module, 'prepare'): - env_module.prepare() - - def make_env(env_args): env_name = env_args['env'] env_source = ENVS.get(env_name, env_name) diff --git a/handyrl/evaluation.py b/handyrl/evaluation.py index a6d0ddf1..2cbbe049 100755 --- a/handyrl/evaluation.py +++ b/handyrl/evaluation.py @@ -7,7 +7,7 @@ import time import multiprocessing as mp -from .environment import prepare_env, make_env +from .environment import make_env from .connection import send_recv, accept_socket_connections, connect_socket_connection from .agent import RandomAgent, RuleBasedAgent, Agent, EnsembleAgent, SoftAgent @@ -296,7 +296,6 @@ def client_mp_child(env_args, model_path, conn): def eval_main(args, argv): env_args = args['env_args'] - prepare_env(env_args) env = make_env(env_args) model_path = argv[0] if len(argv) >= 1 else 'models/latest.pth' @@ -322,7 +321,6 @@ def eval_main(args, argv): def eval_server_main(args, argv): print('network match server mode') env_args = args['env_args'] - prepare_env(env_args) env = make_env(env_args) num_games = int(argv[0]) if len(argv) >= 1 else 100 diff --git a/handyrl/train.py b/handyrl/train.py index 79ae2afc..b282d431 100755 --- a/handyrl/train.py +++ b/handyrl/train.py @@ -21,7 +21,7 @@ import torch.optim as optim import psutil -from .environment import prepare_env, make_env +from .environment import make_env from .util import map_r, bimap_r, trimap_r, rotate from .model import to_torch, to_gpu, ModelWrapper from .losses import compute_target @@ -632,7 +632,6 @@ def run(self): def train_main(args): - prepare_env(args['env_args']) # preparing environment is needed in stand-alone mode learner = Learner(args=args) learner.run() diff --git a/handyrl/worker.py b/handyrl/worker.py index 58cd12f7..7fdcadc1 100755 --- a/handyrl/worker.py +++ b/handyrl/worker.py @@ -13,7 +13,7 @@ import pickle import copy -from .environment import prepare_env, make_env +from .environment import make_env from .connection import QueueCommunicator from .connection import send_recv, open_multiprocessing_connections from .connection import connect_socket_connection, accept_socket_connections @@ -240,7 +240,6 @@ def __init__(self, args): def run(self): args = entry(self.args) print(args) - prepare_env(args['env']) # open worker process = []