Python ray.is_initialized() Examples
The following are 30
code examples of ray.is_initialized().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
ray
, or try the search function
.
Example #1
Source File: ray_fixtures.py From garage with MIT License | 7 votes |
def ray_local_session_fixture(): """Initializes Ray and shuts down Ray in local mode. Yields: None: Yield is for purposes of pytest module style. All statements before the yield are apart of module setup, and all statements after the yield are apart of module teardown. """ if not ray.is_initialized(): ray.init(local_mode=True, ignore_reinit_error=True, log_to_driver=False, include_webui=False) yield if ray.is_initialized(): ray.shutdown()
Example #2
Source File: test_bc.py From garage with MIT License | 6 votes |
def test_bc_point(ray_local_session_fixture): # NOQA del ray_local_session_fixture assert ray.is_initialized() deterministic.set_seed(100) runner = LocalRunner(snapshot_config) goal = np.array([1., 1.]) env = GarageEnv(PointEnv(goal=goal)) expert = OptimalPolicy(env.spec, goal=goal) policy = GaussianMLPPolicy(env.spec, [4]) batch_size = 400 algo = BC(env.spec, policy, batch_size=batch_size, source=expert, max_path_length=200, policy_lr=1e-2, loss='log_prob') runner.setup(algo, env) run_bc(runner, algo, batch_size)
Example #3
Source File: test_bc.py From garage with MIT License | 6 votes |
def test_bc_point_deterministic(ray_local_session_fixture): # NOQA del ray_local_session_fixture assert ray.is_initialized() deterministic.set_seed(100) runner = LocalRunner(snapshot_config) goal = np.array([1., 1.]) env = GarageEnv(PointEnv(goal=goal)) expert = OptimalPolicy(env.spec, goal=goal) policy = DeterministicMLPPolicy(env.spec, hidden_sizes=[8, 8]) batch_size = 600 algo = BC(env.spec, policy, batch_size=batch_size, source=expert, max_path_length=200, policy_lr=1e-2, loss='mse') runner.setup(algo, env) run_bc(runner, algo, batch_size)
Example #4
Source File: test_ray_batched_sampler.py From garage with MIT License | 6 votes |
def test_init_with_env_updates(ray_local_session_fixture): del ray_local_session_fixture assert ray.is_initialized() max_path_length = 16 env = GarageEnv(PointEnv()) policy = FixedPolicy(env.spec, scripted_actions=[ env.action_space.sample() for _ in range(max_path_length) ]) tasks = SetTaskSampler(lambda: GarageEnv(PointEnv())) n_workers = 8 workers = WorkerFactory(seed=100, max_path_length=max_path_length, n_workers=n_workers) sampler = RaySampler.from_worker_factory(workers, policy, envs=tasks.sample(n_workers)) rollouts = sampler.obtain_samples(0, 160, policy) assert sum(rollouts.lengths) >= 160
Example #5
Source File: test_local_tf_runner.py From garage with MIT License | 6 votes |
def test_make_sampler_ray_sampler(self, ray_session_fixture): del ray_session_fixture assert ray.is_initialized() with LocalTFRunner(snapshot_config) as runner: env = GarageEnv(env_name='CartPole-v1') policy = CategoricalMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(8, 8)) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = VPG(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, discount=0.99, optimizer_args=dict(learning_rate=0.01, )) runner.setup(algo, env, sampler_cls=RaySampler) assert isinstance(runner._sampler, RaySampler) runner.train(n_epochs=1, batch_size=10)
Example #6
Source File: ray_fixtures.py From garage with MIT License | 6 votes |
def ray_session_fixture(): """Initializes Ray and shuts down Ray. Yields: None: Yield is for purposes of pytest module style. All statements before the yield are apart of module setup, and all statements after the yield are apart of module teardown. """ if not ray.is_initialized(): ray.init(memory=52428800, object_store_memory=78643200, ignore_reinit_error=True, log_to_driver=False, include_webui=False) yield if ray.is_initialized(): ray.shutdown()
Example #7
Source File: test_examples.py From flow with MIT License | 5 votes |
def setUp(self): if not ray.is_initialized(): ray.init(num_cpus=1)
Example #8
Source File: remote_sampler.py From mbpo with MIT License | 5 votes |
def _create_remote_environment(self, env, policy): env_pkl = pickle.dumps(env) policy_pkl = pickle.dumps(policy) if not ray.is_initialized(): ray.init() self._remote_environment = _RemoteEnv.remote(env_pkl, policy_pkl) # Block until the env and policy is ready initialized = ray.get(self._remote_environment.initialized.remote()) assert initialized, initialized
Example #9
Source File: ray_sampler.py From garage with MIT License | 5 votes |
def __init__(self, worker_factory, agents, envs): # pylint: disable=super-init-not-called if not ray.is_initialized(): ray.init(log_to_driver=False) self._sampler_worker = ray.remote(SamplerWorker) self._worker_factory = worker_factory self._agents = agents self._envs = self._worker_factory.prepare_worker_messages(envs) self._all_workers = defaultdict(None) self._workers_started = False self.start_worker()
Example #10
Source File: vecenv.py From tianshou with MIT License | 5 votes |
def __init__(self, env_fns: List[Callable[[], gym.Env]]) -> None: super().__init__(env_fns) try: if not ray.is_initialized(): ray.init() except NameError: raise ImportError( 'Please install ray to support RayVectorEnv: pip3 install ray') self.envs = [ ray.remote(gym.Wrapper).options(num_cpus=0).remote(e()) for e in env_fns]
Example #11
Source File: utils.py From machina with MIT License | 5 votes |
def __init__(self, trainer_cls, num_trainer=1, master_address=None, **kwargs): if not ray.is_initialized(): init_ray() self.trainers = [trainer_cls.as_remote().remote(**kwargs, rank=i, world_size=num_trainer, master_address=master_address) for i in range(num_trainer)]
Example #12
Source File: raysampler.py From machina with MIT License | 5 votes |
def __init__(self, env, pol, num_parallel=8, prepro=None, seed=256, worker_cls=None, node_info={}): if not ray.is_initialized(): logger.log( "Ray is not initialized. Initialize ray with no GPU resources") init_ray() pol = copy.deepcopy(pol) pol.to('cpu') pol = ray.put(pol) env = ray.put(env) resources = [] for k, v in node_info.items(): for _ in range(v): resources.append({k: 1}) assert len(resources) <= num_parallel if len(resources) < num_parallel: for _ in range(num_parallel - len(resources)): resources.append(None) if worker_cls is None: worker_cls = DefaultSampleWorker self.workers = [worker_cls.as_remote(resources=r).remote(pol, env, seed, i, prepro) for i, r in zip(range(num_parallel), resources)]
Example #13
Source File: http_proxy.py From ray with Apache License 2.0 | 5 votes |
def fetch_config_from_master(self): assert ray.is_initialized() master = serve.api._get_master_actor() self.route_table, [self.router_handle ] = await master.get_http_proxy_config.remote() # The exporter is required to return results for /-/metrics endpoint. [self.metric_exporter] = await master.get_metric_exporter.remote() self.metric_client = MetricClient(self.metric_exporter) self.request_counter = self.metric_client.new_counter( "num_http_requests", description="The number of requests processed", label_names=("route", ))
Example #14
Source File: pool.py From ray with Apache License 2.0 | 5 votes |
def _init_ray(self, processes=None, ray_address=None): # Initialize ray. If ray is already initialized, we do nothing. # Else, the priority is: # ray_address argument > RAY_ADDRESS > start new local cluster. if not ray.is_initialized(): # Cluster mode. if ray_address is None and RAY_ADDRESS_ENV in os.environ: logger.info("Connecting to ray cluster at address='{}'".format( os.environ[RAY_ADDRESS_ENV])) ray.init() elif ray_address is not None: logger.info("Connecting to ray cluster at address='{}'".format( ray_address)) ray.init(address=ray_address) # Local mode. else: logger.info("Starting local ray cluster") ray.init(num_cpus=processes) ray_cpus = int(ray.state.cluster_resources()["CPU"]) if processes is None: processes = ray_cpus if processes <= 0: raise ValueError("Processes in the pool must be >0.") if ray_cpus < processes: raise ValueError("Tried to start a pool with {} processes on an " "existing ray cluster, but there are only {} " "CPUs in the ray cluster.".format( processes, ray_cpus)) return processes
Example #15
Source File: ray_backend.py From ray with Apache License 2.0 | 5 votes |
def configure(self, n_jobs=1, parallel=None, prefer=None, require=None, **memmappingpool_args): """Make Ray Pool the father class of PicklingPool. PicklingPool is a father class that inherits Pool from multiprocessing.pool. The next line is a patch, which changes the inheritance of Pool to be from ray.util.multiprocessing.pool. """ PicklingPool.__bases__ = (Pool, ) """Use all available resources when n_jobs == -1. Must set RAY_ADDRESS variable in the environment or run ray.init(address=..) to run on multiple nodes. """ if n_jobs == -1: if not ray.is_initialized(): import os if "RAY_ADDRESS" in os.environ: logger.info( "Connecting to ray cluster at address='{}'".format( os.environ["RAY_ADDRESS"])) else: logger.info("Starting local ray cluster") ray.init() ray_cpus = int(ray.state.cluster_resources()["CPU"]) n_jobs = ray_cpus eff_n_jobs = super(RayBackend, self).configure( n_jobs, parallel, prefer, require, **memmappingpool_args) return eff_n_jobs
Example #16
Source File: test_multiprocessing.py From ray with Apache License 2.0 | 5 votes |
def test_ray_init(shutdown_only): def getpid(args): return os.getpid() def check_pool_size(pool, size): args = [tuple() for _ in range(size)] assert len(set(pool.map(getpid, args))) == size # Check that starting a pool starts ray if not initialized. pool = Pool(processes=2) assert ray.is_initialized() assert int(ray.state.cluster_resources()["CPU"]) == 2 check_pool_size(pool, 2) pool.terminate() pool.join() ray.shutdown() # Check that starting a pool doesn't affect ray if there is a local # ray cluster running. ray.init(num_cpus=3) assert ray.is_initialized() pool = Pool(processes=2) assert int(ray.state.cluster_resources()["CPU"]) == 3 check_pool_size(pool, 2) pool.terminate() pool.join() ray.shutdown() # Check that trying to start a pool on an existing ray cluster throws an # error if there aren't enough CPUs for the number of processes. ray.init(num_cpus=1) assert ray.is_initialized() with pytest.raises(ValueError): Pool(processes=2) assert int(ray.state.cluster_resources()["CPU"]) == 1 ray.shutdown()
Example #17
Source File: test_joblib.py From ray with Apache License 2.0 | 5 votes |
def test_svm_multiple_nodes(ray_start_cluster_2_nodes): digits = load_digits() param_space = { "C": np.logspace(-6, 6, 30), "gamma": np.logspace(-8, 8, 30), "tol": np.logspace(-4, -1, 30), "class_weight": [None, "balanced"], } model = SVC(kernel="rbf") search = RandomizedSearchCV(model, param_space, cv=5, n_iter=2, verbose=10) register_ray() with joblib.parallel_backend("ray"): search.fit(digits.data, digits.target) assert ray.is_initialized()
Example #18
Source File: test_joblib.py From ray with Apache License 2.0 | 5 votes |
def test_svm_single_node(shutdown_only): digits = load_digits() param_space = { "C": np.logspace(-6, 6, 10), "gamma": np.logspace(-8, 8, 10), "tol": np.logspace(-4, -1, 3), "class_weight": [None, "balanced"], } model = SVC(kernel="rbf") search = RandomizedSearchCV(model, param_space, cv=3, n_iter=2, verbose=10) register_ray() with joblib.parallel_backend("ray"): search.fit(digits.data, digits.target) assert ray.is_initialized()
Example #19
Source File: test_joblib.py From ray with Apache License 2.0 | 5 votes |
def test_register_ray(): register_ray() assert "ray" in joblib.parallel.BACKENDS assert not ray.is_initialized()
Example #20
Source File: test_advanced_3.py From ray with Apache License 2.0 | 5 votes |
def test_initialized_local_mode(shutdown_only_with_initialization_check): assert not ray.is_initialized() ray.init(num_cpus=0, local_mode=True) assert ray.is_initialized()
Example #21
Source File: test_advanced_3.py From ray with Apache License 2.0 | 5 votes |
def shutdown_only_with_initialization_check(): yield None # The code after the yield will run as teardown code. ray.shutdown() assert not ray.is_initialized()
Example #22
Source File: test_tune_restore.py From ray with Apache License 2.0 | 5 votes |
def testTuneRestore(self): self.assertFalse(ray.is_initialized()) tune.run( "__fake", name="TestAutoInit", stop={"training_iteration": 1}, ray_auto_init=True) self.assertTrue(ray.is_initialized())
Example #23
Source File: ray_trial_executor.py From ray with Apache License 2.0 | 5 votes |
def __init__(self, queue_trials=False, reuse_actors=False, ray_auto_init=False, refresh_period=RESOURCE_REFRESH_PERIOD): super(RayTrialExecutor, self).__init__(queue_trials) # Check for if we are launching a trial without resources in kick off # autoscaler. self._trial_queued = False self._running = {} # Since trial resume after paused should not run # trial.train.remote(), thus no more new remote object id generated. # We use self._paused to store paused trials here. self._paused = {} self._trial_cleanup = _TrialCleanup() self._reuse_actors = reuse_actors self._cached_actor = None self._avail_resources = Resources(cpu=0, gpu=0) self._committed_resources = Resources(cpu=0, gpu=0) self._resources_initialized = False self._refresh_period = refresh_period self._last_resource_refresh = float("-inf") self._last_nontrivial_wait = time.time() if not ray.is_initialized() and ray_auto_init: logger.info("Initializing Ray automatically." "For cluster usage or custom Ray initialization, " "call `ray.init(...)` before `tune.run`.") ray.init() if ray.is_initialized(): self._update_avail_resources()
Example #24
Source File: async_api.py From ray with Apache License 2.0 | 5 votes |
def init(): """Initialize plasma event handlers for asyncio support.""" assert ray.is_initialized(), "Please call ray.init before async_api.init" global handler if handler is None: worker = ray.worker.global_worker loop = asyncio.get_event_loop() handler = PlasmaEventHandler(loop, worker) worker.core_worker.set_plasma_added_callback(handler) logger.debug("AsyncPlasma Connection Created!")
Example #25
Source File: test_ray_batched_sampler.py From garage with MIT License | 5 votes |
def test_obtain_exact_trajectories(ray_local_session_fixture): del ray_local_session_fixture assert ray.is_initialized() max_path_length = 15 n_workers = 8 env = GarageEnv(PointEnv()) per_worker_actions = [env.action_space.sample() for _ in range(n_workers)] policies = [ FixedPolicy(env.spec, [action] * max_path_length) for action in per_worker_actions ] workers = WorkerFactory(seed=100, max_path_length=max_path_length, n_workers=n_workers) sampler = RaySampler.from_worker_factory(workers, policies, envs=env) n_traj_per_worker = 3 rollouts = sampler.obtain_exact_trajectories(n_traj_per_worker, policies) # At least one action per trajectory. assert sum(rollouts.lengths) >= n_workers * n_traj_per_worker # All of the trajectories. assert len(rollouts.lengths) == n_workers * n_traj_per_worker worker = -1 for count, rollout in enumerate(rollouts.split()): if count % n_traj_per_worker == 0: worker += 1 assert (rollout.actions == per_worker_actions[worker]).all()
Example #26
Source File: test_ray_batched_sampler.py From garage with MIT License | 5 votes |
def test_update_envs_env_update(ray_local_session_fixture): del ray_local_session_fixture assert ray.is_initialized() max_path_length = 16 env = GarageEnv(PointEnv()) policy = FixedPolicy(env.spec, scripted_actions=[ env.action_space.sample() for _ in range(max_path_length) ]) tasks = SetTaskSampler(PointEnv) n_workers = 8 workers = WorkerFactory(seed=100, max_path_length=max_path_length, n_workers=n_workers) sampler = RaySampler.from_worker_factory(workers, policy, env) rollouts = sampler.obtain_samples(0, 160, np.asarray(policy.get_param_values()), env_update=tasks.sample(n_workers)) mean_rewards = [] goals = [] for rollout in rollouts.split(): mean_rewards.append(rollout.rewards.mean()) goals.append(rollout.env_infos['task'][0]['goal']) assert np.var(mean_rewards) > 0 assert np.var(goals) > 0 with pytest.raises(ValueError): sampler.obtain_samples(0, 10, np.asarray(policy.get_param_values()), env_update=tasks.sample(n_workers + 1))
Example #27
Source File: test_ray_batched_sampler_tf.py From garage with MIT License | 5 votes |
def test_ray_batch_sampler(self, ray_local_session_fixture): del ray_local_session_fixture assert ray.is_initialized() workers = WorkerFactory(seed=100, max_path_length=self.algo.max_path_length) sampler1 = RaySampler(workers, self.policy, self.env) sampler1.start_worker() sampler1.shutdown_worker()
Example #28
Source File: util.py From ray with Apache License 2.0 | 4 votes |
def validate_save_restore(trainable_cls, config=None, num_gpus=0, use_object_store=False): """Helper method to check if your Trainable class will resume correctly. Args: trainable_cls: Trainable class for evaluation. config (dict): Config to pass to Trainable when testing. num_gpus (int): GPU resources to allocate when testing. use_object_store (bool): Whether to save and restore to Ray's object store. Recommended to set this to True if planning to use algorithms that pause training (i.e., PBT, HyperBand). """ assert ray.is_initialized(), "Need Ray to be initialized." remote_cls = ray.remote(num_gpus=num_gpus)(trainable_cls) trainable_1 = remote_cls.remote(config=config) trainable_2 = remote_cls.remote(config=config) from ray.tune.result import TRAINING_ITERATION for _ in range(3): res = ray.get(trainable_1.train.remote()) assert res.get(TRAINING_ITERATION), ( "Validation will not pass because it requires `training_iteration` " "to be returned.") if use_object_store: restore_check = trainable_2.restore_from_object.remote( trainable_1.save_to_object.remote()) ray.get(restore_check) else: restore_check = ray.get( trainable_2.restore.remote(trainable_1.save.remote())) res = ray.get(trainable_2.train.remote()) assert res[TRAINING_ITERATION] == 4 res = ray.get(trainable_2.train.remote()) assert res[TRAINING_ITERATION] == 5 return True
Example #29
Source File: api.py From ray with Apache License 2.0 | 4 votes |
def init(name=None, http_host=DEFAULT_HTTP_HOST, http_port=DEFAULT_HTTP_PORT, metric_exporter=InMemoryExporter): """Initialize or connect to a serve cluster. If serve cluster is already initialized, this function will just return. If `ray.init` has not been called in this process, it will be called with no arguments. To specify kwargs to `ray.init`, it should be called separately before calling `serve.init`. Args: name (str): A unique name for this serve instance. This allows multiple serve instances to run on the same ray cluster. Must be specified in all subsequent serve.init() calls. http_host (str): Host for HTTP server. Default to "0.0.0.0". http_port (int): Port for HTTP server. Default to 8000. metric_exporter(ExporterInterface): The class aggregates metrics from all RayServe actors and optionally export them to external services. RayServe has two options built in: InMemoryExporter and PrometheusExporter """ if name is not None and not isinstance(name, str): raise TypeError("name must be a string.") # Initialize ray if needed. if not ray.is_initialized(): ray.init() # Try to get serve master actor if it exists global master_actor master_actor_name = format_actor_name(SERVE_MASTER_NAME, name) try: master_actor = ray.get_actor(master_actor_name) return except ValueError: pass # Register serialization context once ray.register_custom_serializer(Query, Query.ray_serialize, Query.ray_deserialize) ray.register_custom_serializer(RequestMetadata, RequestMetadata.ray_serialize, RequestMetadata.ray_deserialize) # TODO(edoakes): for now, always start the HTTP proxy on the node that # serve.init() was run on. We should consider making this configurable # in the future. http_node_id = ray.state.current_node_id() master_actor = ServeMaster.options( name=master_actor_name, max_restarts=-1, max_task_retries=-1, ).remote(name, http_node_id, http_host, http_port, metric_exporter) block_until_http_ready( "http://{}:{}/-/routes".format(http_host, http_port), timeout=HTTP_PROXY_TIMEOUT)
Example #30
Source File: test_hybrid_stream.py From ray with Apache License 2.0 | 4 votes |
def test_hybrid_stream(): subprocess.check_call( ["bazel", "build", "//streaming/java:all_streaming_tests_deploy.jar"]) current_dir = os.path.abspath(os.path.dirname(__file__)) jar_path = os.path.join( current_dir, "../../../bazel-bin/streaming/java/all_streaming_tests_deploy.jar") jar_path = os.path.abspath(jar_path) print("jar_path", jar_path) java_worker_options = json.dumps(["-classpath", jar_path]) print("java_worker_options", java_worker_options) assert not ray.is_initialized() ray.init( load_code_from_local=True, include_java=True, java_worker_options=java_worker_options, _internal_config=json.dumps({ "num_workers_per_process_java": 1 })) sink_file = "/tmp/ray_streaming_test_hybrid_stream.txt" if os.path.exists(sink_file): os.remove(sink_file) def sink_func(x): print("HybridStreamTest", x) with open(sink_file, "a") as f: f.write(str(x)) ctx = StreamingContext.Builder().build() ctx.from_values("a", "b", "c") \ .as_java_stream() \ .map("io.ray.streaming.runtime.demo.HybridStreamTest$Mapper1") \ .filter("io.ray.streaming.runtime.demo.HybridStreamTest$Filter1") \ .as_python_stream() \ .sink(sink_func) ctx.submit("HybridStreamTest") import time time.sleep(3) ray.shutdown() with open(sink_file, "r") as f: result = f.read() assert "a" in result assert "b" not in result assert "c" in result