Python ray.remote() Examples
The following are 30
code examples of ray.remote().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
ray
, or try the search function
.
Example #1
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 7 votes |
def testAttachingToCluster(self): node_ip_address = "127.0.0.1" scheduler_port = np.random.randint(40000, 50000) scheduler_address = "{}:{}".format(node_ip_address, scheduler_port) ray.services.start_scheduler(scheduler_address, cleanup=True) time.sleep(0.1) ray.services.start_node(scheduler_address, node_ip_address, num_workers=1, cleanup=True) ray.init(node_ip_address=node_ip_address, scheduler_address=scheduler_address) @ray.remote def f(x): return x + 1 self.assertEqual(ray.get(f.remote(0)), 1) ray.worker.cleanup()
Example #2
Source File: safelife_logger.py From safelife with Apache License 2.0 | 6 votes |
def __init__(self, logdir, config_dict=None, **kwargs): if ray is None: raise ImportError("No module named 'ray'.") logger = SafeLifeLogger(logdir, **kwargs) self.logdir = logdir self.actor = self.SafeLifeLoggingActor.remote(logger, config_dict) self._cstats = logger.cumulative_stats.copy() # _promises stores references to remote updates to cumulative_stats # that will be received in response to having sent a log item. There # is no point exposing this state because there is in general no way # to get up-to-date statistics to any thread, and therefore no benefit # from knowing whether you're waiting for an update. self._promises = [] self._last_update = time.time()
Example #3
Source File: swarm.py From fragile with MIT License | 6 votes |
def step_walkers(self) -> None: """ Make the walkers evolve to their next state sampling an action from the \ :class:`Model` and applying it to the :class:`Environment`. """ model_states = self.walkers.get("model_states") env_states = self.walkers.get("env_states") walkers_states = self.walkers.get("states") parent_ids = ( copy.deepcopy(self.walkers.get("id_walkers")) if self.tree is not None else None ) model_states = self.model.predict( env_states=env_states, model_states=model_states, walkers_states=walkers_states ) env_states = await self.env.step.remote(model_states=model_states, env_states=env_states) # env_states = ray.get(step_id) self.walkers.update_states( env_states=env_states, model_states=model_states, ) self.update_tree(parent_ids)
Example #4
Source File: alexnet.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 6 votes |
def shuffle_pair(first_batch, second_batch): """Shuffle two batches of data. Args: first_batch (Tuple[ObjectID. ObjectID]): The first batch to be shuffled. The first component is the object ID of a batch of images, and the second component is the object ID of the corresponding batch of labels. second_batch (Tuple[ObjectID, ObjectID]): The second batch to be shuffled. The first component is the object ID of a batch of images, and the second component is the object ID of the corresponding batch of labels. Returns: Tuple[ObjectID, ObjectID]: The first batch of shuffled data. Tuple[ObjectID, ObjectID]: Two second bach of shuffled data. """ images1, labels1, images2, labels2 = shuffle_arrays.remote(first_batch[0], first_batch[1], second_batch[0], second_batch[1]) return (images1, labels1), (images2, labels2)
Example #5
Source File: driver.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 6 votes |
def full_grad(theta): theta_id = ray.put(theta) grad_ids = [grad.remote(theta_id, xs_id, ys_id) for (xs_id, ys_id) in batch_ids] return sum(ray.get(grad_ids)).astype("float64") # This conversion is necessary for use with fmin_l_bfgs_b. # From the perspective of scipy.optimize.fmin_l_bfgs_b, full_loss is simply a # function which takes some parameters theta, and computes a loss. Similarly, # full_grad is a function which takes some parameters theta, and computes the # gradient of the loss. Internally, these functions use Ray to distribute the # computation of the loss and the gradient over the data that is represented # by the remote object IDs x_batches and y_batches and which is potentially # distributed over a cluster. However, these details are hidden from # scipy.optimize.fmin_l_bfgs_b, which simply uses it to run the L-BFGS # algorithm. # Load the mnist data and turn the data into remote objects.
Example #6
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 6 votes |
def testComputationGraph(self): ray.init(start_ray_local=True, num_workers=1) @ray.remote def f(x): return x @ray.remote def g(x, y): return x, y a = f.remote(1) b = f.remote(1) c = g.remote(a, b) c = g.remote(a, 1) # Make sure that we can produce a computation_graph visualization. ray.visualize_computation_graph(view=False) ray.worker.cleanup()
Example #7
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 6 votes |
def testPythonMode(self): reload(test_functions) ray.init(start_ray_local=True, driver_mode=ray.PYTHON_MODE) @ray.remote def f(): return np.ones([3, 4, 5]) xref = f.remote() assert_equal(xref, np.ones([3, 4, 5])) # remote functions should return by value assert_equal(xref, ray.get(xref)) # ray.get should be the identity y = np.random.normal(size=[11, 12]) assert_equal(y, ray.put(y)) # ray.put should be the identity # make sure objects are immutable, this example is why we need to copy # arguments before passing them into remote functions in python mode aref = test_functions.python_mode_f.remote() assert_equal(aref, np.array([0, 0])) bref = test_functions.python_mode_g.remote(aref) assert_equal(aref, np.array([0, 0])) # python_mode_g should not mutate aref assert_equal(bref, np.array([1, 0])) ray.worker.cleanup()
Example #8
Source File: worker.py From dkeras with MIT License | 6 votes |
def main(self): while True: flag, data = ray.get(self.ds.pull.remote()) packet_id, mode, datatype = flag.split('_') if mode == 'STOP': break if len(data) > 0: if mode == 'infer': if datatype == 'float': data = np.asarray(data) results = self.model.predict(data, batch_size=self.batch_size) self.ds.push.remote(results, packet_id) elif datatype == 'int8': data = np.asarray(data) data = np.float16(data / 255) results = self.model.predict(data, batch_size=self.batch_size) self.ds.push.remote(results, packet_id) else: raise UserWarning("Invalid datatype flag {}".format(datatype)) else: raise UserWarning("Invalid mode flag {}".format(mode)) else: time.sleep(self.wait_time)
Example #9
Source File: test_base_experiment.py From gobbli with Apache License 2.0 | 6 votes |
def test_base_experiment_gpu(tmpdir, request): skip_if_no_gpu(request.config) tmpdir_path = Path(tmpdir) ds = MockDataset.load() MockExperiment( MockModel, ds, data_dir=tmpdir_path / "test", ray_kwargs={"num_gpus": 1}, ignore_ray_initialized_error=True, ) # Make sure GPUs are available # in a mock remote function # They won't necessarily be available on the master process @ray.remote(num_gpus=1) def find_gpus(): return ray.get_gpu_ids() assert len(ray.get(find_gpus.remote())) > 0
Example #10
Source File: hello_ray.py From adeptRL with GNU General Public License v3.0 | 6 votes |
def main_async(): import asyncio from ray.experimental import async_api ray.init(num_cpus=4) remote_worker = Worker.remote() loop = asyncio.get_event_loop() t_zero = time.time() tasks = [ async_api.as_future(remote_worker.sleep.remote(i)) for i in range(1, 3) ] loop.run_until_complete(asyncio.gather(tasks)) print("delta", time.time() - t_zero)
Example #11
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 6 votes |
def testAttachingToClusterWithMultipleObjectStores(self): node_ip_address = "127.0.0.1" scheduler_port = np.random.randint(40000, 50000) scheduler_address = "{}:{}".format(node_ip_address, scheduler_port) ray.services.start_scheduler(scheduler_address, cleanup=True) time.sleep(0.1) ray.services.start_node(scheduler_address, node_ip_address, num_workers=5, cleanup=True) ray.services.start_node(scheduler_address, node_ip_address, num_workers=5, cleanup=True) ray.services.start_node(scheduler_address, node_ip_address, num_workers=5, cleanup=True) ray.init(node_ip_address=node_ip_address, scheduler_address=scheduler_address) @ray.remote def f(x): return x + 1 self.assertEqual(ray.get(f.remote(0)), 1) ray.worker.cleanup()
Example #12
Source File: rollout_worker.py From adeptRL with GNU General Public License v3.0 | 6 votes |
def as_remote( cls, num_cpus=None, num_gpus=None, memory=None, object_store_memory=None, resources=None, ): # Worker can't use more than 1 gpu, but can also be cpu only assert num_gpus is None or num_gpus <= 1 return ray.remote( num_cpus=num_cpus, num_gpus=num_gpus, memory=memory, object_store_memory=object_store_memory, resources=resources, )(cls)
Example #13
Source File: core.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def tril(a): if a.ndim != 2: raise Exception("Input must have 2 dimensions, but a.ndim is " + str(a.ndim)) result = DistArray(a.shape) for (i, j) in np.ndindex(*result.num_blocks): if i > j: result.objectids[i, j] = ra.copy.remote(a.objectids[i, j]) elif i == j: result.objectids[i, j] = ra.tril.remote(a.objectids[i, j]) else: result.objectids[i, j] = ra.zeros_like.remote(a.objectids[i, j]) return result
Example #14
Source File: failure_test.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def testUnknownSerialization(self): reload(test_functions) ray.init(start_ray_local=True, num_workers=1, driver_mode=ray.SILENT_MODE) test_functions.test_unknown_type.remote() time.sleep(0.2) task_info = ray.task_info() self.assertEqual(len(task_info["failed_tasks"]), 1) self.assertEqual(len(task_info["running_tasks"]), 0) ray.worker.cleanup()
Example #15
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def testUsingReusablesOnDriver(self): ray.init(start_ray_local=True, num_workers=1) # Test that we can add a variable to the key-value store. def foo_initializer(): return [] def foo_reinitializer(foo): return [] ray.reusables.foo = ray.Reusable(foo_initializer, foo_reinitializer) @ray.remote def use_foo(): foo = ray.reusables.foo foo.append(1) return foo # Check that running a remote function does not reset the reusable variable # on the driver. foo = ray.reusables.foo self.assertEqual(foo, []) foo.append(2) self.assertEqual(foo, [2]) foo.append(3) self.assertEqual(foo, [2, 3]) self.assertEqual(ray.get(use_foo.remote()), [1]) self.assertEqual(ray.get(use_foo.remote()), [1]) self.assertEqual(ray.get(use_foo.remote()), [1]) # Check that the copy of foo on the driver has not changed. self.assertEqual(foo, [2, 3]) foo = ray.reusables.foo self.assertEqual(foo, [2, 3]) ray.worker.cleanup()
Example #16
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def testReusableVariablesInPythonMode(self): reload(test_functions) ray.init(start_ray_local=True, driver_mode=ray.PYTHON_MODE) def l_init(): return [] def l_reinit(l): return [] ray.reusables.l = ray.Reusable(l_init, l_reinit) @ray.remote def use_l(): l = ray.reusables.l l.append(1) return l # Get the local copy of the reusable variable. This should be stateful. l = ray.reusables.l assert_equal(l, []) # Make sure the remote function does what we expect. assert_equal(ray.get(use_l.remote()), [1]) assert_equal(ray.get(use_l.remote()), [1]) # Make sure the local copy of the reusable variable has not been mutated. assert_equal(l, []) l = ray.reusables.l assert_equal(l, []) # Make sure that running a remote function does not reset the state of the # local copy of the reusable variable. l.append(2) assert_equal(ray.get(use_l.remote()), [1]) assert_equal(l, [2]) ray.worker.cleanup()
Example #17
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def testCachingFunctionsToRun(self): # Test that we export functions to run on all workers before the driver is connected. def f(worker): sys.path.append(1) ray.worker.global_worker.run_function_on_all_workers(f) def f(worker): sys.path.append(2) ray.worker.global_worker.run_function_on_all_workers(f) def g(worker): sys.path.append(3) ray.worker.global_worker.run_function_on_all_workers(g) def f(worker): sys.path.append(4) ray.worker.global_worker.run_function_on_all_workers(f) ray.init(start_ray_local=True, num_workers=2) @ray.remote def get_state(): time.sleep(1) return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1] res1 = get_state.remote() res2 = get_state.remote() self.assertEqual(ray.get(res1), (1, 2, 3, 4)) self.assertEqual(ray.get(res2), (1, 2, 3, 4)) # Clean up the path on the workers. def f(worker): sys.path.pop() sys.path.pop() sys.path.pop() sys.path.pop() ray.worker.global_worker.run_function_on_all_workers(f) ray.worker.cleanup()
Example #18
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def testCachingReusables(self): # Test that we can define reusable variables before the driver is connected. def foo_initializer(): return 1 def bar_initializer(): return [] def bar_reinitializer(bar): return [] ray.reusables.foo = ray.Reusable(foo_initializer) ray.reusables.bar = ray.Reusable(bar_initializer, bar_reinitializer) @ray.remote def use_foo(): return ray.reusables.foo @ray.remote def use_bar(): ray.reusables.bar.append(1) return ray.reusables.bar ray.init(start_ray_local=True, num_workers=2) self.assertEqual(ray.get(use_foo.remote()), 1) self.assertEqual(ray.get(use_foo.remote()), 1) self.assertEqual(ray.get(use_bar.remote()), [1]) self.assertEqual(ray.get(use_bar.remote()), [1]) ray.worker.cleanup()
Example #19
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def testWait(self): ray.init(start_ray_local=True, num_workers=1) @ray.remote def f(delay): time.sleep(delay) return 1 objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)] ready_ids, remaining_ids = ray.wait(objectids) self.assertTrue(len(ready_ids) == 1) self.assertTrue(len(remaining_ids) == 3) ready_ids, remaining_ids = ray.wait(objectids, num_returns=4) self.assertEqual(ready_ids, objectids) self.assertEqual(remaining_ids, []) objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)] start_time = time.time() ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4) self.assertTrue(time.time() - start_time < 2) self.assertEqual(len(ready_ids), 3) self.assertEqual(len(remaining_ids), 1) ray.wait(objectids) objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)] start_time = time.time() ready_ids, remaining_ids = ray.wait(objectids, timeout=5) self.assertTrue(time.time() - start_time < 5) self.assertEqual(len(ready_ids), 1) self.assertEqual(len(remaining_ids), 3) ray.worker.cleanup()
Example #20
Source File: runtest.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def testReferenceCountNone(self): ray.init(start_ray_local=True, num_workers=1) # Make sure that we aren't accidentally messing up Python's reference counts. @ray.remote def f(): return sys.getrefcount(None) first_count = ray.get(f.remote()) second_count = ray.get(f.remote()) self.assertEqual(first_count, second_count) ray.worker.cleanup()
Example #21
Source File: core.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def ones(shape, dtype_name="float"): result = DistArray(shape) for index in np.ndindex(*result.num_blocks): result.objectids[index] = ra.ones.remote(DistArray.compute_block_shape(index, shape), dtype_name=dtype_name) return result
Example #22
Source File: core.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def zeros(shape, dtype_name="float"): result = DistArray(shape) for index in np.ndindex(*result.num_blocks): result.objectids[index] = ra.zeros.remote(DistArray.compute_block_shape(index, shape), dtype_name=dtype_name) return result
Example #23
Source File: linalg.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def tsqr_hr(a): """Algorithm 6 from http://www.eecs.berkeley.edu/Pubs/TechRpts/2013/EECS-2013-175.pdf""" q, r_temp = tsqr.remote(a) y, u, s = modified_lu.remote(q) y_blocked = ray.get(y) t, y_top = tsqr_hr_helper1.remote(u, s, y_blocked.objectids[0, 0], a.shape[1]) r = tsqr_hr_helper2.remote(s, r_temp) return ray.get(y), ray.get(t), ray.get(y_top), ray.get(r)
Example #24
Source File: linalg.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def modified_lu(q): """ Algorithm 5 from http://www.eecs.berkeley.edu/Pubs/TechRpts/2013/EECS-2013-175.pdf takes a matrix q with orthonormal columns, returns l, u, s such that q - s = l * u arguments: q: a two dimensional orthonormal q return values: l: lower triangular u: upper triangular s: a diagonal matrix represented by its diagonal """ q = q.assemble() m, b = q.shape[0], q.shape[1] S = np.zeros(b) q_work = np.copy(q) for i in range(b): S[i] = -1 * np.sign(q_work[i, i]) q_work[i, i] -= S[i] q_work[(i + 1):m, i] /= q_work[i, i] # scale ith column of L by diagonal element q_work[(i + 1):m, (i + 1):b] -= np.outer(q_work[(i + 1):m, i], q_work[i, (i + 1):b]) # perform Schur complement update L = np.tril(q_work) for i in range(b): L[i, i] = 1 U = np.triu(q_work)[:b, :] return ray.get(numpy_to_dist.remote(ray.put(L))), U, S # TODO(rkn): get rid of put
Example #25
Source File: alexnet.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def compute_mean_image(batches): """Computes the mean image given a list of batches of images. Args: batches (List[ObjectID]): A list of batches of images. Returns: ndarray: The mean image """ if len(batches) == 0: raise Exception("No images were passed into `compute_mean_image`.") sum_image_ids = [ra.sum.remote(batch, axis=0) for batch in batches] n_images = num_images.remote(batches) return np.sum(ray.get(sum_image_ids), axis=0).astype("float64") / ray.get(n_images)
Example #26
Source File: alexnet.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def num_images(batches): """Counts number of images in batches. Args: batches (List): Collection of batches of images and labels. Returns: int: The number of images """ shape_ids = [ra.shape.remote(batch) for batch in batches] return sum([shape[0] for shape in ray.get(shape_ids)])
Example #27
Source File: hyperopt.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def cnn_setup(x, y, keep_prob, lr, stddev): first_hidden = 32 second_hidden = 64 fc_hidden = 1024 W_conv1 = weight([5, 5, 1, first_hidden], stddev) B_conv1 = bias([first_hidden]) x_image = tf.reshape(x, [-1, 28, 28, 1]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + B_conv1) h_pool1 = max_pool_2x2(h_conv1) W_conv2 = weight([5, 5, first_hidden, second_hidden], stddev) b_conv2 = bias([second_hidden]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) W_fc1 = weight([7 * 7 * second_hidden, fc_hidden], stddev) b_fc1 = bias([fc_hidden]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * second_hidden]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) h_fc1_drop= tf.nn.dropout(h_fc1, keep_prob) W_fc2 = weight([fc_hidden, 10], stddev) b_fc2 = bias([10]) y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_conv), reduction_indices=[1])) correct_pred = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1)) return tf.train.AdamOptimizer(lr).minimize(cross_entropy), tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Define a remote function that takes a set of hyperparameters as well as the # data, consructs and trains a network, and returns the validation accuracy.
Example #28
Source File: driver.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def full_loss(theta): theta_id = ray.put(theta) loss_ids = [loss.remote(theta_id, xs_id, ys_id) for (xs_id, ys_id) in batch_ids] return sum(ray.get(loss_ids)) # Compute the gradient of the loss on the entire dataset.
Example #29
Source File: driver.py From ray-legacy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def env_initializer(): return gym.make("Pong-v0") # Function for reinitializing the gym environment in order to guarantee that # the state of the game is reset after each remote task.
Example #30
Source File: registry.py From rl_algorithms with MIT License | 5 votes |
def build_ray_obj_from_cfg( cfg: ConfigDict, registry: Registry, default_args: dict = None ): """Build a module from config dict. Args: cfg (:obj: `ConfigDict`): Config dict. It should at least contain the key "type". registry (:obj:`Registry`): The registry to search the type from. default_args (dict, optional): Default initialization arguments. Returns: obj: The constructed object. """ assert isinstance(cfg, dict) and "type" in cfg assert isinstance(default_args, dict) or default_args is None args = cfg.copy() obj_type = args.pop("type") if isinstance(obj_type, str): obj_cls = registry.get(obj_type) if obj_cls is None: raise KeyError( "{} is not in the {} registry".format(obj_type, registry.name) ) elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError( "type must be a str or valid type, but got {}".format(type(obj_type)) ) if default_args is not None: for name, value in default_args.items(): args.setdefault(name, value) return ray.remote(num_cpus=1)(obj_cls).remote(**args)