Python mpi4py.MPI.Get_processor_name() Examples

The following are 10 code examples of mpi4py.MPI.Get_processor_name(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mpi4py.MPI , or try the search function .
Example #1
Source File: mpi.py    From westpa with MIT License 6 votes vote down vote up
def __init__(self):
        # Initialize communicator and obtain standard MPI variables
        comm = MPI.COMM_WORLD

        self.comm = comm
        self.rank = comm.Get_rank()
        self.num_procs = comm.Get_size()
        self.name = MPI.Get_processor_name()

        # Define master rank
        self.master_rank = 0

        # Define message tags for task, result, and announce
        self.task_tag = 10
        self.result_tag = 20
        self.announce_tag = 30

        # create an empty message buffer
        messages = [] 
Example #2
Source File: pt_toy_example.py    From beat with GNU General Public License v3.0 6 votes vote down vote up
def worker_process(comm, rank, tags, status):
    # Worker processes execute code below
    name = MPI.Get_processor_name()
    print("I am a worker with rank %d on %s." % (rank, name))
    comm.send(None, dest=0, tag=tags.READY)

    while True:
        print('Recieving ...')
        task = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
        print('received!')

        tag = status.Get_tag()

        if tag == tags.START:
            # Do the work here
            result = task + 1
            print('attempting to send ...')
            comm.send(result, dest=0, tag=tags.DONE)
            print('sending worked ...')
        elif tag == tags.EXIT:
            print('went through exit')
            break 
Example #3
Source File: parallel.py    From PyDeepGP with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_id_within_node(comm=None):
    from mpi4py import MPI
    if comm is None: comm = MPI.COMM_WORLD
    rank = comm.rank
    nodename =  MPI.Get_processor_name()
    nodelist = comm.allgather(nodename)
    return len([i for i in nodelist[:rank] if i==nodename]) 
Example #4
Source File: space.py    From maxwell-b with GNU General Public License v3.0 5 votes vote down vote up
def _init_gpu(comm):
    """ Chooses a gpu and creates a context on it. """
    # Find out how many GPUs are available to us on this node.
    driver.init()
    num_gpus = driver.Device.count()

    # Figure out the names of the other hosts.
    rank = comm.Get_rank() # Find out which process I am.
    name = MPI.Get_processor_name() # The name of my node.
    hosts = comm.allgather(name) # Get the names of all the other hosts

    # Find out which GPU to take (by precedence).
    gpu_id = hosts[0:rank].count(name)
    if gpu_id >= num_gpus:
        raise TypeError('No GPU available.')

    
    # Create a context on the appropriate device.
    for k in range(num_gpus):
        try:
            device = driver.Device((gpu_id + k) % num_gpus)
            context = device.make_context()
        except:
            continue
        else:
#             print "On %s: process %d taking gpu %d of %d.\n" % \
#                 (name, rank, gpu_id+k, num_gpus)
            break

    return device, context # Return device and context.

# Global variable for the global space.
# The leading double underscore should prevent outside modules from accessing
# this variable. 
Example #5
Source File: space.py    From maxwell-b with GNU General Public License v3.0 5 votes vote down vote up
def _init_gpu(comm):
    """ Chooses a gpu and creates a context on it. """
    # Find out how many GPUs are available to us on this node.
    driver.init()
    num_gpus = driver.Device.count()

    # Figure out the names of the other hosts.
    rank = comm.Get_rank() # Find out which process I am.
    name = MPI.Get_processor_name() # The name of my node.
    hosts = comm.allgather(name) # Get the names of all the other hosts

    # Find out which GPU to take (by precedence).
    gpu_id = hosts[0:rank].count(name)
    if gpu_id >= num_gpus:
        raise TypeError('No GPU available.')

    
    # Create a context on the appropriate device.
    for k in range(num_gpus):
        try:
            device = driver.Device((gpu_id + k) % num_gpus)
            context = device.make_context()
        except:
            continue
        else:
#             print "On %s: process %d taking gpu %d of %d.\n" % \
#                 (name, rank, gpu_id+k, num_gpus)
            break

    return device, context # Return device and context.

# Global variable for the global space.
# The leading double underscore should prevent outside modules from accessing
# this variable. 
Example #6
Source File: mpi.py    From utilities with MIT License 5 votes vote down vote up
def get_host():
    """Get the hostname that this task is running on"""
    return MPI.Get_processor_name() 
Example #7
Source File: batch.py    From nbodykit with GNU General Public License v3.0 4 votes vote down vote up
def _get_tasks(self):
        """
        Internal generator that yields the next available task from a worker
        """
        if self.is_root():
            raise RuntimeError("Root rank mistakenly told to await tasks")

        # logging info
        if self.comm.rank == 0:
            args = (self.rank, MPI.Get_processor_name(), self.comm.size)
            self.logger.debug("worker master rank is %d on %s with %d processes available" %args)

        # continously loop and wait for instructions
        while True:
            args = None
            tag = -1

            # have the master rank of the subcomm ask for task and then broadcast
            if self.comm.rank == 0:
                self.basecomm.send(None, dest=0, tag=self.tags.READY)
                args = self.basecomm.recv(source=0, tag=MPI.ANY_TAG, status=self.status)
                tag = self.status.Get_tag()

            # bcast to everyone in the worker subcomm
            args  = self.comm.bcast(args) # args is [task_number, task_value]
            tag   = self.comm.bcast(tag)

            # yield the task
            if tag == self.tags.START:

                # yield the task value
                yield args

                # wait for everyone in task group before telling master this task is done
                self.comm.Barrier()
                if self.comm.rank == 0:
                    self.basecomm.send([args[0], None], dest=0, tag=self.tags.DONE)

            # see ya later
            elif tag == self.tags.EXIT:
                break

        # wait for everyone in task group and exit
        self.comm.Barrier()
        if self.comm.rank == 0:
            self.basecomm.send(None, dest=0, tag=self.tags.EXIT)

        # debug logging
        self.logger.debug("rank %d process is done waiting" %self.rank) 
Example #8
Source File: pt.py    From beat with GNU General Public License v3.0 4 votes vote down vote up
def worker_process(comm, tags, status):
    """
    Worker processes, that do the actual sampling.
    They receive all arguments by the master process.

    Parameters
    ----------
    comm : mpi.communicator
    tags : message tags
    status : mpi.status object
    """
    name = MPI.Get_processor_name()
    logger.debug(
        "Entering worker process with rank %d on %s." % (comm.rank, name))
    comm.send(None, dest=0, tag=tags.READY)

    logger.debug('Worker %i receiving work package ...' % comm.rank)
    kwargs = comm.recv(source=0, tag=tags.INIT, status=status)
    logger.debug('Worker %i received package!' % comm.rank)

    try:
        step = kwargs['step']
    except KeyError:
        raise ValueError('Step method not defined!')

    # do initial sampling
    result = sample_pt_chain(**kwargs)
    comm.Send([result, MPI.DOUBLE], dest=0, tag=tags.DONE)

    # enter repeated sampling
    while True:
        # TODO: make transd-compatible
        data = num.empty(step.lordering.size, dtype=tconfig.floatX)
        comm.Recv([data, MPI.DOUBLE],
                  tag=MPI.ANY_TAG, source=0, status=status)

        tag = status.Get_tag()
        if tag == tags.SAMPLE:
            lpoint = step.lij.a2l(data)
            start = step.lij.l2d(lpoint)
            kwargs['start'] = start
            # overwrite previous point in case got swapped
            kwargs['step'].chain_previous_lpoint[comm.rank] = lpoint
            result = sample_pt_chain(**kwargs)

            logger.debug('Worker %i attempting to send ...' % comm.rank)
            comm.Send([result, MPI.DOUBLE], dest=0, tag=tags.DONE)
            logger.debug('Worker %i sent message successfully ...' % comm.rank)

        elif tag == tags.BETA:
            logger.debug(
                'Worker %i received beta: %f' % (comm.rank, data[0]))
            kwargs['step'].beta = data[0]

        elif tag == tags.EXIT:
            logger.debug('Worker %i went through EXIT!' % comm.rank)
            break 
Example #9
Source File: manager.py    From mpi_learn with GNU General Public License v3.0 4 votes vote down vote up
def get_device(comm, num_masters=1, gpu_limit=-1, gpu_for_master=False):
    """Arguments:
        comm: MPI intracommunicator containing all processes
        num_masters: number of processes that will be assigned as masters
        gpu_limit: maximum number of gpus to use on one host
        gpu_for_master: whether master processes should be given a gpu
       Returns device name 'cpu' or 'gpuN' appropriate for use with theano""" 
    def get_gpu_list(mem_lim = 2000):
        import gpustat
        stats = gpustat.GPUStatCollection.new_query()
        ids = list(map(lambda gpu: int(gpu.entry['index']), stats))
        ratios = map(lambda gpu: float(gpu.entry['memory.used'])/float(gpu.entry['memory.total']), stats)
        #used = list(map(lambda gpu: float(gpu.entry['memory.used']), stats))
        #unused_gpu = filter(lambda x: x[1] < 100.0, zip(ids, used))
        free = list(map(lambda gpu: float(gpu.entry['memory.total'])-float(gpu.entry['memory.used']), stats))
        unused_gpu = list(filter(lambda x: x[1]  > mem_lim, zip(ids, free)))
        return [x[0] for x in unused_gpu]

    # Get the ranks of the other processes that share the same host
    # and determine which GPU to take on the host
    if gpu_limit==0:
        logging.info("required to not use gpu")
        dev = 'cpu'
        return dev
    
    rank = comm.Get_rank()    
    host = MPI.Get_processor_name()
    hosts = comm.allgather(host)
    workers_sharing_host = [ i for i in range(comm.Get_size()) if hosts[i] == host ]
    if rank in workers_sharing_host:
        worker_id = workers_sharing_host.index( rank )
    else:
        worker_id = -1
    
    for inode in range( comm.Get_size()):
        if rank == inode:
            gpu_list = get_gpu_list()
            if gpu_limit>=0:
                gpu_list = gpu_list[:gpu_limit] #limit the number of gpu
            if len(gpu_list) == 0:
                logging.info("No free GPU available. Using CPU instead.")
                dev = 'cpu'
            elif worker_id<0:
                ## alone on that machine
                logging.info("Alone on the node and taking the last gpu")
                dev = 'gpu%d' % (gpu_list[-1])
            else:
                logging.debug("Sharing a node and taking on the gpu")
                dev = 'gpu%d' % (gpu_list[worker_id%len(gpu_list)])
            logging.debug("rank %d can have %s",rank,dev)
        comm.Barrier()
    return dev 
Example #10
Source File: imagenet.py    From Theano-MPI with Educational Community License v2.0 4 votes vote down vote up
def spawn_load(self):
    
        '''spwan a parallel loading process using MPI'''

        if not para_load:
            return

        from mpi4py import MPI
        import os
        import sys
        
        hostname = MPI.Get_processor_name()
        mpiinfo = MPI.Info.Create()
        
        # will give all nodes filled issue if use key=host because need an additional slot
        # also the hostname should be exactly the same in the output list of --display-allocation
        if hostname != hostname.split('.')[0]:
            hostname = hostname.split('.')[0]
        mpiinfo.Set(key = 'add-host',value = hostname)
            
        num_spawn = 1

        if "CPULIST_train" in os.environ:
        # see https://gist.github.com/lebedov/eadce02a320d10f0e81c
            # print os.environ['CPULIST_train']
            envstr=""
            # for key, value in dict(os.environ).iteritems():
            #     envstr+= '%s=%s\n' % (key,value)
            envstr+='CPULIST_train=%s\n' %  os.environ['CPULIST_train']
            mpiinfo.Set(key ='env', value = envstr)

        
        ninfo = mpiinfo.Get_nkeys()
        # print ninfo
        
        mpicommand = sys.executable

        file_dir = os.path.dirname(os.path.realpath(__file__))# get the dir of imagenet.py
    
        self.icomm= MPI.COMM_SELF.Spawn(mpicommand, 
                args=[file_dir+'/proc_load_mpi.py'],
                info = mpiinfo, maxprocs = num_spawn)