Python mpi4py.MPI.ANY_SOURCE Examples

The following are 26 code examples of mpi4py.MPI.ANY_SOURCE(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mpi4py.MPI , or try the search function .
Example #1
Source File: eq_loc.py    From pykonal with GNU General Public License v3.0 6 votes vote down vote up
def id_distribution_loop(argc, cfg):
    """
    A loop to distribute event IDs to hungry workers.
    """

    arrivals = load_arrivals(argc.arrivals_file)
    event_ids = arrivals["event_id"].unique()
    # Distribute event IDs.
    for idx in range(len(event_ids)):
        event_id = event_ids[idx]
        requesting_rank = COMM.recv(source=MPI.ANY_SOURCE, tag=ID_REQUEST_TAG)
        COMM.send(event_id, dest=requesting_rank, tag=ID_TRANSMISSION_TAG)
    # Distribute sentinel.
    for irank in range(WORLD_SIZE - 1):
        requesting_rank = COMM.recv(source=MPI.ANY_SOURCE, tag=ID_REQUEST_TAG)
        COMM.send(None, dest=requesting_rank, tag=ID_TRANSMISSION_TAG)

    return (True) 
Example #2
Source File: mpiserve.py    From GridCal with GNU General Public License v3.0 6 votes vote down vote up
def _handle_message(self):
        """Handle received messages.

        Receive record update messages of the form
            ('action', record_id, params)
        where 'action' is the name of an EvalRecord method and params is
        the list of parameters.  The record_id should be recorded in the
        hub's records table (which happens whenever it is referenced in
        a message sent to a worker).

        On a message indicating that the worker is done with the record,
        we add the worker that sent the message back to the free pool.
        """
        logger.debug("Handle incoming message")
        s = MPI.Status()
        data = comm.recv(status=s, source=MPI.ANY_SOURCE, tag=0)
        logger.debug("Received message: %s", data)
        mname = data[0]
        record = self._recids[data[1]]
        method = getattr(record, mname)
        method(*data[2:])
        if mname == 'complete' or mname == 'cancel' or mname == 'kill':
            logger.debug("Re-queueing worker")
            self._workers.append(s.source)
        self.ping() 
Example #3
Source File: easgd_server.py    From Theano-MPI with Educational Community License v2.0 6 votes vote down vote up
def run(self, model):
        
        if self.comm == None:
            
            print('Server communicator not initialized')
            
            return
            
        print('server started')

        while True:
            #  Wait for next request from client
            
            request = self.comm.recv(source=MPI.ANY_SOURCE, tag=199)
                
            #  Do some process work and formulate a reply
            reply = self.process_request(model, request['id'],
                                        request['rank'],request['message'])

            #  Send reply back to client
            self.comm.send(reply, dest=request['rank'], tag=200)
            
            # Do some action work after reply
            self.action_after(model, request['id'],
                                request['rank'], request['message']) 
Example #4
Source File: Interaction.py    From pilot with Apache License 2.0 6 votes vote down vote up
def __init__(self, rank=None, nonMPIMode=False, logger=None):
        if nonMPIMode:
            self.comm = None
            self.stat = None
            self.nRank = 0
            self.totalRanks = 1
            self.selectSource = None
        else:
            from mpi4py import MPI
            self.comm = MPI.COMM_WORLD
            self.stat = MPI.Status()
            self.nRank = self.comm.Get_rank()
            self.totalRanks = self.comm.Get_size()
            self.selectSource = MPI.ANY_SOURCE

        self.logger = logger

        # for message in rank 0
        self.hasMessage = False
        self.recvQueue = recvQueue
        self.sendQueue = sendQueue


    # get rank of itself 
Example #5
Source File: mpi.py    From oggm with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def mpi_master_spin_tasks(task, gdirs):
    comm = OGGM_MPI_COMM
    cfg_store = cfg.pack_config()
    msg_list = ([gdir for gdir in gdirs if gdir is not None] +
                [None] * OGGM_MPI_SIZE)

    _imprint("Starting MPI task distribution...")

    comm.bcast((cfg_store, task), root=OGGM_MPI_ROOT)

    status = MPI.Status()
    for msg in msg_list:
        comm.recv(source=MPI.ANY_SOURCE, status=status)
        comm.send(obj=msg, dest=status.Get_source())

    _imprint("MPI task distribution done, collecting results...")

    comm.gather(sendobj=None, root=OGGM_MPI_ROOT)

    _imprint("MPI task results gotten!") 
Example #6
Source File: mpi_process.py    From dispel4py with Apache License 2.0 6 votes vote down vote up
def _read(self):
        result = super(MPIWrapper, self)._read()
        if result is not None:
            return result

        status = MPI.Status()
        msg = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
        tag = status.Get_tag()
        while tag == STATUS_TERMINATED:
            self.terminated += 1
            if self.terminated >= self._num_sources:
                break
            else:
                msg = comm.recv(source=MPI.ANY_SOURCE,
                                tag=MPI.ANY_TAG,
                                status=status)
                tag = status.Get_tag()
        return msg, tag 
Example #7
Source File: plearn.py    From tribeflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def manage(comm, num_workers):
    available_to_pair = -1
    finished = {}
    num_finished = 0
    
    for worker_id in xrange(1, num_workers + 1):
        finished[worker_id] = False

    while num_finished != num_workers:
        status = MPI.Status()
        worker_id = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, \
                status=status)
        event = status.Get_tag()

        if event == Msg.STARTED.value:
            print('Worker', worker_id, 'is working!')
        
        elif event == Msg.PAIRME.value:
            if num_finished == num_workers - 1: #only 1 working, pair with self
                comm.isend(worker_id, dest=worker_id, tag=Msg.PAIRED.value)
            else:
                assert available_to_pair != worker_id
                if available_to_pair == -1:
                    available_to_pair = worker_id
                else:
                    comm.isend(available_to_pair, dest=worker_id, \
                            tag=Msg.PAIRED.value)
                    comm.isend(worker_id, dest=available_to_pair, \
                            tag=Msg.PAIRED.value)
                    available_to_pair = -1
        elif event == Msg.FINISHED.value:
            print('Worker', worker_id, 'has finished it\'s iterations!')
            finished[worker_id] = True
            num_finished += 1
            
            #wake up last worker if it's waiting for a pair
            if num_finished == num_workers - 1 and available_to_pair != -1:
                comm.isend(available_to_pair, dest=available_to_pair, \
                        tag=Msg.PAIRED.value)
        else:
            print(0, 'Unknown message received', worker_id, event, Msg(event)) 
Example #8
Source File: mpi.py    From westpa with MIT License 5 votes vote down vote up
def _receive_loop(self):
        comm = self.comm 

        while True:

            status = MPI.Status()
            comm.Iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status)
            message_src = status.Get_source()
            message_tag = status.Get_tag()

            # results are tuples of (task_id, {'result', 'exception'}, value)
            if message_tag == self.result_tag:
                (task_id, result_stat, result_value) = comm.recv(source = message_src, tag = message_tag)

                ft = self.pending_futures.pop(task_id)

                if result_stat == 'exception':
                    ft._set_exception(*result_value)
# Check with Matt on what else to do for an exception
                else:
                    ft._set_result(result_value)
                    self.task_dest.append(message_src)

            # Check for announcements
            elif message_tag == self.announce_tag:
                messages = comm.recv(source = message_src, tag = message_tag)
                if 'shutdown' in messages:
                    log.debug('exiting _receive_loop()')
                    return 
Example #9
Source File: exchanger.py    From Theano-MPI with Educational Community License v2.0 5 votes vote down vote up
def push_message(self, dest_rank, count_arr, recorder=None):
        
        '''
        push message:
        push params_i and alpha_i to the choosen rank
        '''
        
        # detect if any other worker is pushing to self at the same time to prevent deadlock
        while self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=700): 
            if self.test: print('a potential deadlock prevented')
            self.process_messages(count_arr, recorder)
            
        if recorder: recorder.start()

        # 0. blocking request
        
        if self.test: print('%d pushing msg to %d'  % (self.rank,dest_rank))
        
        self.comm.Send(buf=count_arr,dest=dest_rank, tag=700)  
        
        if self.test: print('%d requested to %d'  % (self.rank,dest_rank))
        
        # 1. push
        
        self.gpucomm, dest_gpurank, self_gpurank = self.get_gpucomm_with(dest_rank)
        
        self._push_params(self_gpurank, dest_rank)
        
        if self.test: print('%d msg pushed'  % self.rank)
        
        if recorder: recorder.end('comm') 
Example #10
Source File: exchanger.py    From Theano-MPI with Educational Community License v2.0 5 votes vote down vote up
def process_messages(self, count_arr, recorder=None):
        
        if recorder: recorder.start()
        
        status = MPI.Status()
        
        s = self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=700, status=status)
        
        # if self.test: print '%d probed, got %s' % (self.rank,s)
        
        while s:
            
            src_rank=status.source
            
            self.comm.Recv(buf=count_arr, source=src_rank, tag=700, status=status)
            
            self.gpucomm, src_gpurank, self_gpurank = self.get_gpucomm_with(src_rank)
            
            if self.test: print('%d merging with %d' % (self.rank, src_rank))
            
            self._merge_params_from(src_gpurank, src_rank)
            
            s = self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=700, status=status)
            
            if self.test: print('%d probed again, got %s' % (self.rank,s))
            
        if recorder: recorder.end('comm') 
Example #11
Source File: mpi.py    From deepdish with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def imap(f, workloads, star=False):
    global _g_available_workers, _g_initialized
    from mpi4py import MPI
    N = MPI.COMM_WORLD.Get_size() - 1
    if N == 0 or not _g_initialized:
        mapf = [map, itr.starmap][star]
        for res in mapf(f, workloads):
            yield res
        return

    results = []
    indices = []

    for job_index, workload in enumerate(itr.chain(workloads, itr.repeat(None))):
        if workload is None and len(_g_available_workers) == N:
            break

        while not _g_available_workers or workload is None:
            # Wait to receive results
            status = MPI.Status()
            ret = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
            if status.tag == 2:
                results.append(ret['output_data'])
                indices.append(ret['job_index'])
                _g_available_workers.add(status.source)
                if len(_g_available_workers) == N:
                    break

        if _g_available_workers and workload is not None:
            dest_rank = _g_available_workers.pop()

            # Send off job
            task = dict(func=f, input_data=workload, job_index=job_index, unpack=star)
            MPI.COMM_WORLD.send(task, dest=dest_rank, tag=10)

    II = np.argsort(indices)  
    for i in II:
        yield results[i] 
Example #12
Source File: mpi.py    From deepdish with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def imap_unordered(f, workloads, star=False):
    global _g_available_workers, _g_initialized

    from mpi4py import MPI
    N = MPI.COMM_WORLD.Get_size() - 1
    if N == 0 or not _g_initialized:
        mapf = [map, itr.starmap][star]
        for res in mapf(f, workloads):
            yield res
        return

    for job_index, workload in enumerate(itr.chain(workloads, itr.repeat(None))):
        if workload is None and len(_g_available_workers) == N:
            break

        while not _g_available_workers or workload is None:
            # Wait to receive results
            status = MPI.Status()
            ret = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
            if status.tag == 2:
                yield ret['output_data']
                _g_available_workers.add(status.source)
                if len(_g_available_workers) == N:
                    break

        if _g_available_workers and workload is not None:
            dest_rank = _g_available_workers.pop()

            # Send off job
            task = dict(func=f, input_data=workload, job_index=job_index, unpack=star)
            MPI.COMM_WORLD.send(task, dest=dest_rank, tag=10) 
Example #13
Source File: mpi_worker_pool.py    From parsl with Apache License 2.0 5 votes vote down vote up
def recv_task_request_from_workers(self):
        """ Receives 1 task request from MPI comm

        Returns:
        --------
            worker_rank: worker_rank id
        """
        info = MPI.Status()
        comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)
        worker_rank = info.Get_source()
        logger.info("Received task request from worker:{}".format(worker_rank))
        return worker_rank 
Example #14
Source File: mpi_worker_pool.py    From parsl with Apache License 2.0 5 votes vote down vote up
def recv_result_from_workers(self):
        """ Receives a results from the MPI worker pool and send it out via 0mq

        Returns:
        --------
            result: task result from the workers
        """
        info = MPI.Status()
        result = self.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=info)
        logger.debug("Received result from workers: {}".format(result))
        return result 
Example #15
Source File: parallel_archipelago.py    From bingo with Apache License 2.0 5 votes vote down vote up
def _gather_updated_ages(self, total_age):
        total_age.update({0: self._island.generational_age})
        status = MPI.Status()
        while self.comm.iprobe(source=MPI.ANY_SOURCE,
                               tag=AGE_UPDATE,
                               status=status):
            data = self.comm.recv(source=status.Get_source(),
                                  tag=AGE_UPDATE)
            total_age.update(data) 
Example #16
Source File: process.py    From mpi_learn with GNU General Public License v3.0 5 votes vote down vote up
def recv_any_from_child(self,status):
        """Receives any message from any child.  Returns the provided status object,
            populated with information about received message"""
        self.recv( tag='any', source=MPI.ANY_SOURCE, status=status, comm=self.child_comm )
        return status 
Example #17
Source File: mpi.py    From oggm with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _shutdown_slaves():
    global OGGM_MPI_COMM
    if OGGM_MPI_COMM is not None and OGGM_MPI_COMM != MPI.COMM_NULL:
        msgs = [StopIteration] * OGGM_MPI_SIZE
        status = MPI.Status()
        OGGM_MPI_COMM.bcast((None, None), root=OGGM_MPI_ROOT)
        for msg in msgs:
            OGGM_MPI_COMM.recv(source=MPI.ANY_SOURCE, status=status)
            OGGM_MPI_COMM.send(obj=msg, dest=status.Get_source())
        OGGM_MPI_COMM.gather(sendobj=None, root=OGGM_MPI_ROOT)
    OGGM_MPI_COMM = None 
Example #18
Source File: mpi_queue_process.py    From dispel4py with Apache License 2.0 5 votes vote down vote up
def receive(wrapper):
    while wrapper.terminated < wrapper._num_sources:
        status = MPI.Status()
        msg = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
        tag = status.Get_tag()
        # print('Received %s, %s' % (msg, tag))
        if tag == STATUS_TERMINATED:
            wrapper.terminated += 1
        else:
            wrapper.input_data.put((msg, tag))
        # self.wrapper.pe.log('Queue size: %s'%self.wrapper.input_data.qsize())
    # put the final terminate block into the queue
    wrapper.input_data.put((None, STATUS_TERMINATED)) 
Example #19
Source File: communication.py    From deep500 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def wait_for_any_rank(self):
        """
        Waits for any message from any rank.
        @return A 3-tuple of (tensor, source rank, tag)
        """
        status = MPI.Status()
        tensor = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
        return tensor, status.source, status.tag 
Example #20
Source File: communication.py    From heat with MIT License 5 votes vote down vote up
def Recv(self, buf, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=None):
        if isinstance(buf, dndarray.DNDarray):
            buf = buf._DNDarray__array
        if not isinstance(buf, torch.Tensor):
            return self.handle.Recv(buf, source, tag, status)

        rbuf = buf if CUDA_AWARE_MPI else buf.cpu()
        ret = self.handle.Recv(self.as_buffer(rbuf), source, tag, status)

        if isinstance(buf, torch.Tensor) and buf.is_cuda and not CUDA_AWARE_MPI:
            buf.copy_(rbuf)
        return ret 
Example #21
Source File: communication.py    From heat with MIT License 5 votes vote down vote up
def Irecv(self, buf, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG):
        if isinstance(buf, dndarray.DNDarray):
            buf = buf._DNDarray__array
        if not isinstance(buf, torch.Tensor):
            return MPIRequest(self.handle.Irecv(buf, source, tag))

        rbuf = buf if CUDA_AWARE_MPI else buf.cpu()
        return MPIRequest(self.handle.Irecv(self.as_buffer(rbuf), source, tag), None, rbuf, buf) 
Example #22
Source File: mpi_load_balancer.py    From pyscf with Apache License 2.0 4 votes vote down vote up
def master(self):
        status = MPI.Status()
        work = []
        for index in range(0,self.nindices):
            work.append(range(len(self.outblocks[index])))
        block_indices = pyscf.lib.cartesian_prod(work)
        nwork = len(block_indices)
        iwork = 0
        iwork_recieved = 0
        working_procs=[]
        for i in range(1,self.size):
            data = 0
            tag = tags.KILL
            if iwork < nwork:
                data = block_indices[iwork]
                tag = tags.WORK
                working_procs.append(i)
#            print("MASTER : sending out msg to processor ", i, data)
            self.COMM.isend(obj=data, dest=i, tag=tag)
            iwork += 1

        for i in range(iwork,nwork+1):
            msg = self.COMM.Probe(MPI.ANY_SOURCE, tag=tags.WORK_DONE, status=status)
            recieved_from = status.Get_source()
            data = self.COMM.recv(source=recieved_from, tag=tags.WORK_DONE)
            iwork_recieved += 1
#            print("MASTER : just recieved work_done from processor ", recieved_from, " (",iwork_recieved,"/",nwork,")")
            if i == nwork:
##                print("MASTER : returning...")
                break

            data = block_indices[i]
            tag = tags.WORK
#            print("MASTER : sending out new work to processor ", recieved_from, data)
            self.COMM.isend(obj=data, dest=recieved_from, tag=tag)

        for i in range(iwork_recieved,nwork):
#            print("waiting on work...")
            msg = self.COMM.Probe(MPI.ANY_SOURCE, tag=tags.WORK_DONE, status=status)
            recieved_from = status.Get_source()
            data = self.COMM.recv(source=recieved_from, tag=tags.WORK_DONE)

        # You only have to send a kill to the processors that actually did work, otherwise they were sent a kill
        # at the very first loop
        for i in working_procs:
            data = 0
            tag = tags.KILL
#            print("MASTER (ALL_WORK_DONE): sending kill out to rank ", i, data)
            self.COMM.isend(obj=data, dest=i, tag=tag)

        return 
Example #23
Source File: mpi.py    From abcpy with BSD 3-Clause Clear License 4 votes vote down vote up
def orchestrate_map(self,pds_id):
        """Orchestrates the teams to perform a map function
        
        This works by keeping track of the teams who haven't finished executing,
        waiting for them to request the next chunk of data when they are free,
        responding to them with the data and then sending them a Sentinel
        signalling that they can exit.
        """
        is_map_done = [True if i in self.mpimanager.get_scheduler_node_ranks() else False for i in range(self.mpimanager.get_scheduler_size())]
        status = MPI.Status()

        #Copy it to the pending. This is so when scheduler accesses
        #the PDS data it's not empty.
        self.pds_pending_store[pds_id] = list(self.pds_store[pds_id])

        #While we have some ranks that haven't finished
        while sum(is_map_done)<self.mpimanager.get_scheduler_size():
            #Wait for a reqest from anyone
            data_request = self.mpimanager.get_scheduler_communicator().recv(
                source=MPI.ANY_SOURCE,
                tag=MPI.ANY_TAG,
                status=status,
            )
            request_from_rank = status.source

            if data_request!=pds_id:
                print("Ignoring stale PDS data request from",
                    request_from_rank,":",data_request,"/",pds_id)
                continue

            #Pointer so we don't have to keep doing dict lookups
            current_pds_items = self.pds_pending_store[pds_id]
            num_current_pds_items = len(current_pds_items)

            #Everyone's already exhausted all the data.
            # Send a sentinel and mark the node as finished
            if num_current_pds_items == 0:
                self.mpimanager.get_scheduler_communicator().send(None, dest=request_from_rank, tag=pds_id)
                is_map_done[request_from_rank] = True
            else:
                #Create the chunk of data to send. Pop off items and tag them with an id.
                # so we can sort them later
                chunk_to_send = []
                for i in range(self.chunk_size):
                    chunk_to_send+=[(num_current_pds_items-i,current_pds_items.pop())]
                    self.mpimanager.get_scheduler_communicator().send(chunk_to_send, dest=request_from_rank, tag=pds_id) 
Example #24
Source File: mpi_pool.py    From astroABC with MIT License 4 votes vote down vote up
def map(self, function, jobs):
                '''
                Map function to perform similar function to multiprocessing mp.pool map()
                Input:
                function  - function to be mapped
                jobs - array of jobs to be assigned to each proc

                '''
                njobs = len(jobs)
                self.function = function


                # If not the master just wait for instructions.
                if not self.rank == 0:
                        self.worker()
                        return

                F = _func_wrapper(function)
                req = [self.comm.isend(F, dest=i) for i in range(1,self.size)]
                MPI.Request.waitall(req)

                if  (njobs <= self.size-1):
                        requests = []
                        for i, job in enumerate(jobs):
                                worker_id = i % self.size + 1
                                req = self.comm.isend(job, dest=worker_id, tag=i)
                                requests.append(req)

                        MPI.Request.waitall(requests)

                        results = []
                        for i in range(njobs):
                                worker_id = i % self.size + 1
                                result = self.comm.recv(source=worker_id, tag=i)
                                results.append(result)
                        return results

                else:
                        for i in range(1, self.size):
                                req = self.comm.isend(jobs[i-1], dest=i, tag=i)

                        njobs_done = self.size-1 # don't use master for function
                        results = [None]*njobs
                        for job in range(njobs):
                                status = MPI.Status()
                                result = self.comm.recv(source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG, status=status)
                                worker_id = status.source
                                results[job] = result

                                if njobs_done < njobs:
                                        job = jobs[njobs_done]
                                        i = njobs_done
                                        self.comm.isend(job, dest=worker_id, tag=i)
                                        njobs_done += 1


                        return results 
Example #25
Source File: pt_toy_example.py    From beat with GNU General Public License v3.0 4 votes vote down vote up
def master_process(comm, size, tags, status):

    num_workers = size - 1
    tasks = range(num_workers)
    chain = []
    active_workers = 0
    # start sampling of chains with given seed
    print("Master starting with %d workers" % num_workers)
    for i in range(num_workers):
        comm.recv(source=MPI.ANY_SOURCE, tag=tags.READY, status=status)
        source = status.Get_source()
        comm.send(tasks[i], dest=source, tag=tags.START)
        print("Sent task to worker %i" % source)
        active_workers += 1

    print("Parallel tempering ...")
    print("----------------------")

    while True:
        m1 = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
        source1 = status.Get_source()
        print("Got sample 1 from worker %i" % source1)
        m2 = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
        source2 = status.Get_source()
        print("Got sample 2 from worker %i" % source1)

        m1, m2 = metrop_select(m1, m2)
        print('samples 1, 2 %i %i' % (m1, m2))
        chain.extend([m1, m2])
        if len(chain) < nsamples:
            print("Sending states back to workers ...")
            comm.send(m1, dest=source1, tag=tags.START)
            comm.send(m2, dest=source2, tag=tags.START)
        else:
            print('Requested number of samples reached!')
            break

    print("Master finishing, recorded chain:")
    print(chain)
    print("Closing ...")
    for i in range(1, size):
        print('sending signal to close to %i' % i)
        comm.send(None, dest=i, tag=tags.EXIT)

        print("Closed worker %i" % i)
        active_workers -= 1 
Example #26
Source File: batch.py    From nbodykit with GNU General Public License v3.0 4 votes vote down vote up
def _distribute_tasks(self, tasks):
        """
        Internal function that distributes the tasks from the root to the workers
        """
        if not self.is_root():
            raise ValueError("only the root rank should distribute the tasks")

        ntasks = len(tasks)
        task_index     = 0
        closed_workers = 0

        # logging info
        args = (self.workers, ntasks)
        self.logger.debug("master starting with %d worker(s) with %d total tasks" %args)

        # loop until all workers have finished with no more tasks
        while closed_workers < self.workers:

            # look for tags from the workers
            data = self.basecomm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=self.status)
            source = self.status.Get_source()
            tag = self.status.Get_tag()

            # worker is ready, so send it a task
            if tag == self.tags.READY:

                # still more tasks to compute
                if task_index < ntasks:
                    this_task = [task_index, tasks[task_index]]
                    self.basecomm.send(this_task, dest=source, tag=self.tags.START)
                    self.logger.debug("sending task `%s` to worker %d" %(str(tasks[task_index]), source))
                    task_index += 1

                # all tasks sent -- tell worker to exit
                else:
                    self.basecomm.send(None, dest=source, tag=self.tags.EXIT)

            # store the results from finished tasks
            elif tag == self.tags.DONE:
                self.logger.debug("received result from worker %d" %source)

            # track workers that exited
            elif tag == self.tags.EXIT:
                closed_workers += 1
                self.logger.debug("worker %d has exited, closed workers = %d" %(source, closed_workers))