Python cloudpickle.dumps() Examples

The following are 30 code examples of cloudpickle.dumps(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cloudpickle , or try the search function .
Example #1
Source File: test_meta_evaluator.py    From garage with MIT License 6 votes vote down vote up
def test_pickle_meta_evaluator():
    set_seed(100)
    tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))
    max_path_length = 200
    env = GarageEnv(PointEnv())
    n_traj = 3
    with tempfile.TemporaryDirectory() as log_dir_name:
        runner = LocalRunner(
            SnapshotConfig(snapshot_dir=log_dir_name,
                           snapshot_mode='last',
                           snapshot_gap=1))
        meta_eval = MetaEvaluator(test_task_sampler=tasks,
                                  max_path_length=max_path_length,
                                  n_test_tasks=10,
                                  n_exploration_traj=n_traj)
        policy = RandomPolicy(env.spec.action_space)
        algo = MockAlgo(env, policy, max_path_length, n_traj, meta_eval)
        runner.setup(algo, env)
        log_file = tempfile.NamedTemporaryFile()
        csv_output = CsvOutput(log_file.name)
        logger.add_output(csv_output)
        meta_eval.evaluate(algo)
        meta_eval_pickle = cloudpickle.dumps(meta_eval)
        meta_eval2 = cloudpickle.loads(meta_eval_pickle)
        meta_eval2.evaluate(algo) 
Example #2
Source File: parallel_history.py    From rltime with Apache License 2.0 6 votes vote down vote up
def __init__(self, history_cls, history_args):
        super().__init__()

        self._last_needed_feed_count = 0
        self.results = {}
        self.pending_counts = {}

        # Make sure to use 'spawn' and not 'fork' to allow shared CUDA tensors
        # on linux
        ctx = mp.get_context('spawn')
        self.close_event = ctx.Event()
        self.qevent = ctx.Event()
        # Queue for requests, such as getting training data
        self.request_queue = ctx.Queue(10)
        # Queue for updates like new acting samples and priority updates
        self.update_queue = ctx.Queue(10)
        # Queue for sending back request results
        self.result_queue = ctx.Queue()

        self._process = ctx.Process(
            target=self.run,
            args=(history_cls, cloudpickle.dumps(history_args)))

        self._process.start() 
Example #3
Source File: launcher.py    From EPG with MIT License 6 votes vote down vote up
def run_with_logger(thunk, logdir):
    from epg.launching import logger
    from mpi4py import MPI
    rank = MPI.COMM_WORLD.Get_rank()
    if rank == 0:
        os.makedirs(logdir, exist_ok=True)
    try:
        with logger.scoped_configure(dir=logdir, format_strs=None if rank == 0 else []):
            retval = thunk()
            if rank == 0:
                atomic_write(pickle.dumps(retval, protocol=-1), os.path.join(logdir, 'retval.pkl'))
            return retval
    except Exception as e:
        with open(os.path.join(logdir, "exception%i.txt" % rank), 'wt') as fh:
            fh.write(traceback.format_exc())
        raise e 
Example #4
Source File: test__task_commons.py    From tf-yarn with Apache License 2.0 6 votes vote down vote up
def test__prepare_container():
    with contextlib.ExitStack() as stack:
        # mock modules
        mocked_client_call = stack.enter_context(
            patch(f"{MODULE_TO_TEST}.skein.ApplicationClient.from_current"))
        mocked_logs = stack.enter_context(patch(f'{MODULE_TO_TEST}._setup_container_logs'))
        mocked_cluster_spec = stack.enter_context(patch(f'{MODULE_TO_TEST}.cluster.start_cluster'))

        # fill client mock
        mocked_client = mock.MagicMock(spec=skein.ApplicationClient)
        host_port = ('localhost', 1234)
        instances = [('worker', 10), ('chief', 1)]
        mocked_client.kv.wait.return_value = json.dumps(instances).encode()
        mocked_client_call.return_value = mocked_client
        (client, cluster_spec, cluster_tasks) = _prepare_container(host_port)

        # checks
        mocked_logs.assert_called_once()
        mocked_cluster_spec.assert_called_once_with(host_port, mocked_client, cluster_tasks)
        assert client == mocked_client
        assert cluster_tasks == list(iter_tasks(instances)) 
Example #5
Source File: utils.py    From rltime with Apache License 2.0 6 votes vote down vote up
def tcp_send_object(sock, obj, compress=False, pre_pickled=False):
    """Sends any python object over TCP using cloud-pickle with optional LZ4
    compression. Returns True if sent, False if connection closed"""
    data = cloudpickle.dumps(obj) if not pre_pickled else obj
    if compress:
        import lz4.frame
        data = lz4.frame.compress(data)

    # Send metadata to receiver: Size of the data buffer and whether
    # compression is enabled
    sock.send(struct.pack("II",len(data), 1 if compress else 0))
    sent = sock.send(data)
    if not sent:
        return False
    # Assumed either connection closed and sent=0, or the full thing was sent?
    # Maybe not if XFR stopped in the middle??
    assert(sent == len(data))
    return True 
Example #6
Source File: client.py    From tf-yarn with Apache License 2.0 6 votes vote down vote up
def get_safe_experiment_fn(full_fn_name: str, *args):
    """
    tf-yarn serializes the provided experiment function with cloudpickle.dumps.
    This is good for interactive experiments but can sometimes fail
    because the function is not serializable.
    You can use this wrapper function
    if you ship your experiment function (via conda, pex) manually to the workers.

    full_fn_name
        the name of the function ( with the full path to package and module)
        i.e. tf_yarn.my_module.my_experiment_fn

    args
        arguments to be provided to this function

    """
    module_name, fn_name = full_fn_name.rsplit('.', 1)
    module = importlib.import_module(module_name)
    experiment_fn = getattr(module, fn_name)
    return partial(experiment_fn, *args) 
Example #7
Source File: client.py    From tf-yarn with Apache License 2.0 6 votes vote down vote up
def _run_on_cluster(
    experiment_fn: ExperimentFn,
    skein_cluster: SkeinCluster,
    eval_monitor_log_thresholds: Dict[str, Tuple[float, float]] = None,
    n_try: int = 0
) -> Optional[metrics.Metrics]:
    def _new_experiment_fn():
        return _add_monitor_to_experiment(experiment_fn())
    new_experiment_fn = _new_experiment_fn

    # Attempt serialization early to avoid allocating unnecesary resources
    serialized_fn = cloudpickle.dumps(new_experiment_fn)
    with skein_cluster.client:
        return _execute_and_await_termination(
            skein_cluster,
            serialized_fn,
            eval_monitor_log_thresholds,
            n_try=n_try
        ) 
Example #8
Source File: OptiGenAlgNsga2Deap.py    From pyleecan with Apache License 2.0 6 votes vote down vote up
def as_dict(self):
        """Convert this objet in a json seriable dict (can be use in __init__)
        """

        # Get the properties inherited from OptiGenAlg
        OptiGenAlgNsga2Deap_dict = super(OptiGenAlgNsga2Deap, self).as_dict()
        if self.toolbox is None:
            OptiGenAlgNsga2Deap_dict["toolbox"] = None
        else:  # Store serialized data (using cloudpickle) and str to read it in json save files
            OptiGenAlgNsga2Deap_dict["toolbox"] = {
                "__class__": str(type(self._toolbox)),
                "__repr__": str(self._toolbox.__repr__()),
                "serialized": dumps(self._toolbox).decode("ISO-8859-2"),
            }
        # The class name is added to the dict fordeserialisation purpose
        # Overwrite the mother class name
        OptiGenAlgNsga2Deap_dict["__class__"] = "OptiGenAlgNsga2Deap"
        return OptiGenAlgNsga2Deap_dict 
Example #9
Source File: cloudpickle.py    From pywren-ibm-cloud with Apache License 2.0 6 votes vote down vote up
def dumps(obj, protocol=None):
    """Serialize obj as a string of bytes allocated in memory

    protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
    pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
    between processes running the same Python version.

    Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
    compatibility with older versions of Python.
    """
    file = StringIO()
    try:
        cp = CloudPickler(file, protocol=protocol)
        cp.dump(obj)
        return file.getvalue()
    finally:
        file.close()


# including pickles unloading functions in this namespace 
Example #10
Source File: gym.py    From sonic_contest with MIT License 6 votes vote down vote up
def __init__(self, make_env, observation_space):
        self.observation_space = observation_space
        if isinstance(observation_space, gym.spaces.Box):
            num_elems = len(np.array(observation_space.low).flatten())
            zeros = [0] * num_elems
            self._obs_buf = Array('b', zeros)
        else:
            self._obs_buf = None
        self._pipe, other_end = Pipe()
        self._proc = Process(target=self._worker,
                             args=(other_end,
                                   self._obs_buf,
                                   cloudpickle.dumps(make_env)),
                             daemon=True)
        self._proc.start()
        self._running_cmd = None
        other_end.close()
        self._pipe.send(('action_space', None))
        self.action_space = self._get_response() 
Example #11
Source File: subprocess_env_manager.py    From RLs with Apache License 2.0 6 votes vote down vote up
def create_worker(
        worker_id: int,
        step_queue: Queue,
        env_factory: Callable[[int, List[SideChannel]], BaseEnv],
        engine_configuration: EngineConfig,
    ) -> UnityEnvWorker:
        parent_conn, child_conn = Pipe()

        # Need to use cloudpickle for the env factory function since function objects aren't picklable
        # on Windows as of Python 3.6.
        pickled_env_factory = cloudpickle.dumps(env_factory)
        child_process = Process(
            target=worker,
            args=(
                child_conn,
                step_queue,
                pickled_env_factory,
                worker_id,
                engine_configuration,
                logger.level,
            ),
        )
        child_process.start()
        return UnityEnvWorker(child_process, worker_id, parent_conn) 
Example #12
Source File: subproc_env.py    From Actor-Critic-Based-Resource-Allocation-for-Multimodal-Optical-Networks with GNU General Public License v3.0 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #13
Source File: parallel_runner.py    From pymarl with Apache License 2.0 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #14
Source File: __init__.py    From sonic_contest with MIT License 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #15
Source File: __init__.py    From pytorch-pommerman-rl with MIT License 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #16
Source File: multiprocessing_env.py    From rl_algorithms with MIT License 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle

        return cloudpickle.dumps(self.x) 
Example #17
Source File: context.py    From daskos with Apache License 2.0 5 votes vote down vote up
def _save(self, task, value):
        return self.zk.create(self.path(task=task), cloudpickle.dumps(value),
                              makepath=True) 
Example #18
Source File: multiprocessing_env.py    From ppo-pytorch with MIT License 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #19
Source File: __init__.py    From MOREL with MIT License 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #20
Source File: runtime.py    From rekall with Apache License 2.0 5 votes vote down vote up
def __init__(self, fn, num_workers, initializer=None):
        """Initializes the instance.

        Args:
            fn: The function to run in child processes.
            num_workers: Number of child processes to create.
            initializer: A function to run in the child process after it is 
                created. It can be used to set up necessary resources in the
                worker.
        """
        self._pool = mp.get_context("spawn").Pool(
                processes=num_workers,
                initializer=initializer)
        self._pickled_fn = cloudpickle.dumps(fn) 
Example #21
Source File: launcher.py    From EPG with MIT License 5 votes vote down vote up
def write_metadata(dir, args, kwargs):
    with open(os.path.join(dir, 'metadata.json'), 'wt') as fh:
        fh.write(json.dumps(dict(args=args, kwargs=kwargs))) 
Example #22
Source File: sub_proc.py    From rltime with Apache License 2.0 5 votes vote down vote up
def __init__(self, env_fns, spaces=None, context='spawn'):
        """
        Arguments:

        env_fns: iterable of callables -  functions that create environments
            to run in subprocesses. Need to be cloud-pickleable
        """
        self.waiting = False
        self.closed = False
        nenvs = len(env_fns)
        ctx = mp.get_context(context)
        self.remotes, self.work_remotes = \
            zip(*[ctx.Pipe() for _ in range(nenvs)])
        self.ps = [
            ctx.Process(
                target=worker,
                args=(work_remote, remote, cloudpickle.dumps(env_fn)))
            for (work_remote, remote, env_fn) in zip(
                self.work_remotes, self.remotes, env_fns)
        ]
        for p in self.ps:
            p.daemon = True  # if the main process crashes, we should not cause things to hang
            with clear_mpi_env_vars():
                p.start()
        for remote in self.work_remotes:
            remote.close()

        self.remotes[0].send(('get_spaces_spec', None))
        observation_space, action_space, self.spec = self.remotes[0].recv()
        self.viewer = None
        VecEnv.__init__(self, len(env_fns), observation_space, action_space) 
Example #23
Source File: actor_pool.py    From rltime with Apache License 2.0 5 votes vote down vote up
def set_actor_policy(self, actor_policy):
        # Save the source policy and make the policy creator for the actors
        self._source_actor_policy = actor_policy
        policy_creator = actor_policy.get_creator()
        if not self._receivers:
            # No receivers, just kick-off acting directly
            self._start_get_samples(policy_creator)
        else:
            # Send the policy creator to each receiver so it can start acting
            obj = cloudpickle.dumps(policy_creator)
            for state_queue in self._state_queues:
                state_queue.put(obj) 
Example #24
Source File: tf_subproc.py    From ape-x with Apache License 2.0 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #25
Source File: __init__.py    From DRL_DeliveryDuel with MIT License 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #26
Source File: __init__.py    From ICML2019-TREX with MIT License 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #27
Source File: __init__.py    From ICML2019-TREX with MIT License 5 votes vote down vote up
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
Example #28
Source File: hunch_publisher.py    From Hunch with Apache License 2.0 5 votes vote down vote up
def create_model_blob_with_custom_setup(self, model_blob, custom_package_name, custom_package_version,
                                            custom_package_path):
        return cloudpickle.dumps(
            self._create_model_blob_details_with_custom_setup(
                model_blob, custom_package_name, custom_package_version, custom_package_path)) 
Example #29
Source File: hunch_publisher.py    From Hunch with Apache License 2.0 5 votes vote down vote up
def publish_asm_model(self, path_to_prediction_module, path_to_model_resources_dir, model_id, model_version, path_to_setup_py=None, custom_package_name=None):
        self._prediction_module_guardrails_breach(path_to_prediction_module, path_to_model_resources_dir, path_to_setup_py)
        model_blob, model_meta = self._create_model_blob(path_to_prediction_module, path_to_model_resources_dir, path_to_setup_py, custom_package_name)
        self.write_model_to_blob_storage(cloudpickle.dumps(model_blob), model_id, model_version) 
Example #30
Source File: hunch_publisher.py    From Hunch with Apache License 2.0 5 votes vote down vote up
def publish_model(self, model_instance, model_id, model_version, path_to_setup_py = None, custom_package_name = None):
        """

        Args:
            model_instance:
            path_to_setup_py:
            custom_package_name:

        Returns:

        """
        model_blob, model_meta = self._create_model_blob(None, None, path_to_setup_py, custom_package_name, cloudpickle.dumps(model_instance))
        self.write_model_to_blob_storage(cloudpickle.dumps(model_blob), model_id, model_version)