Python multiprocessing.cpu_count() Examples

The following are 30 code examples of multiprocessing.cpu_count(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .
Example #1
Source File: utils.py    From nmp_qc with MIT License 8 votes vote down vote up
def get_graph_stats(graph_obj_handle, prop='degrees'):
    # if prop == 'degrees':
    num_cores = multiprocessing.cpu_count()
    inputs = [int(i*len(graph_obj_handle)/num_cores) for i in range(num_cores)] + [len(graph_obj_handle)]
    res = Parallel(n_jobs=num_cores)(delayed(get_values)(graph_obj_handle, inputs[i], inputs[i+1], prop) for i in range(num_cores))

    stat_dict = {}

    if 'degrees' in prop:
        stat_dict['degrees'] = list(set([d for core_res in res for file_res in core_res for d in file_res['degrees']]))
    if 'edge_labels' in prop:
        stat_dict['edge_labels'] = list(set([d for core_res in res for file_res in core_res for d in file_res['edge_labels']]))
    if 'target_mean' in prop or 'target_std' in prop:
        param = np.array([file_res['params'] for core_res in res for file_res in core_res])
    if 'target_mean' in prop:
        stat_dict['target_mean'] = np.mean(param, axis=0)
    if 'target_std' in prop:
        stat_dict['target_std'] = np.std(param, axis=0)

    return stat_dict 
Example #2
Source File: text_reader.py    From LearnPaddle2 with Apache License 2.0 7 votes vote down vote up
def train_reader(train_list_path):

    def reader():
        with open(train_list_path, 'r') as f:
            lines = f.readlines()
            # 打乱数据
            np.random.shuffle(lines)
            # 开始获取每张图像和标签
            for line in lines:
                data, label = line.split('\t')
                yield data, label

    return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 1024)


# 测试数据的预处理 
Example #3
Source File: scheduling.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def cpu_count():
    """Return the number of CPU cores."""
    try:
        return multiprocessing.cpu_count()
    # TODO: remove except clause once we support only python >= 2.6
    except NameError:
        ## This code part is taken from parallel python.
        # Linux, Unix and MacOS
        if hasattr(os, "sysconf"):
            if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
                # Linux & Unix
                n_cpus = os.sysconf("SC_NPROCESSORS_ONLN")
                if isinstance(n_cpus, int) and n_cpus > 0:
                    return n_cpus
            else:
                # OSX
                return int(os.popen2("sysctl -n hw.ncpu")[1].read())
        # Windows
        if "NUMBER_OF_PROCESSORS" in os.environ:
            n_cpus = int(os.environ["NUMBER_OF_PROCESSORS"])
            if n_cpus > 0:
                return n_cpus
        # Default
        return 1 
Example #4
Source File: run_atari.py    From lirpg with MIT License 6 votes vote down vote up
def train(env_id, num_timesteps, seed, policy):

    ncpu = multiprocessing.cpu_count()
    if sys.platform == 'darwin': ncpu //= 2
    config = tf.ConfigProto(allow_soft_placement=True,
                            intra_op_parallelism_threads=ncpu,
                            inter_op_parallelism_threads=ncpu)
    config.gpu_options.allow_growth = True #pylint: disable=E1101
    tf.Session(config=config).__enter__()

    env = VecFrameStack(make_atari_env(env_id, 8, seed), 4)
    policy = {'cnn' : CnnPolicy, 'lstm' : LstmPolicy, 'lnlstm' : LnLstmPolicy}[policy]
    ppo2.learn(policy=policy, env=env, nsteps=128, nminibatches=4,
        lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
        ent_coef=.01,
        lr=lambda f : f * 2.5e-4,
        cliprange=lambda f : f * 0.1,
        total_timesteps=int(num_timesteps * 1.1)) 
Example #5
Source File: getmetrics_ec2monitoring.py    From InsightAgent with Apache License 2.0 6 votes vote down vote up
def get_cpuusage(filename,field_values,which_dict):
    cpuusage_file = open(os.path.join(homepath,datadir,filename))
    lines = cpuusage_file.read().split("\n")
    cpu_dict={}
    cpu_count = multiprocessing.cpu_count()
    for i in range(0,cpu_count):
        cpucore = "cpu"+str(i)
        cpu_dict[cpucore] = {}
    for eachline in lines:
        tokens_split = eachline.split("=")
        if(len(tokens_split) == 1):
            continue
        cpucoresplit = tokens_split[0].split("$")
        cpu_dict[cpucoresplit[0]][cpucoresplit[1]] = float(tokens_split[1])
    totalresult = 0
    for i in range(0,cpu_count):
        cpucore = "cpu"+str(i)
        which_dict["cpu_usage"] = cpu_dict
        Total = cpu_dict[cpucore]["user"] + cpu_dict[cpucore]["nice"] + cpu_dict[cpucore]["system"] + cpu_dict[cpucore]["idle"] + cpu_dict[cpucore]["iowait"] + cpu_dict[cpucore]["irq"] + cpu_dict[cpucore]["softirq"]
        idle = cpu_dict[cpucore]["idle"] + cpu_dict[cpucore]["iowait"]
        field_values[0] = "CPU"
        result = 1 - round(float(idle/Total),4)
        totalresult += float(result)
    field_values.append(totalresult*100) 
Example #6
Source File: config.py    From pywren-ibm-cloud with Apache License 2.0 6 votes vote down vote up
def load_config(config_data):
    config_data['pywren']['runtime'] = RUNTIME_NAME_DEFAULT
    config_data['pywren']['runtime_memory'] = None
    if 'runtime_timeout' not in config_data['pywren']:
        config_data['pywren']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT

    if 'storage_backend' not in config_data['pywren']:
        config_data['pywren']['storage_backend'] = 'localhost'

    if 'localhost' not in config_data:
        config_data['localhost'] = {}

    if 'ibm_cos' in config_data and 'private_endpoint' in config_data['ibm_cos']:
        del config_data['ibm_cos']['private_endpoint']

    if 'workers' in config_data['pywren']:
        config_data['localhost']['workers'] = config_data['pywren']['workers']
    else:
        total_cores = multiprocessing.cpu_count()
        config_data['pywren']['workers'] = total_cores
        config_data['localhost']['workers'] = total_cores 
Example #7
Source File: get_recipes.py    From recipe-box with MIT License 6 votes vote down vote up
def scrape_recipe_box(scraper, site_str, page_iter, status_interval=50):

    if args.append:
        recipes = quick_load(site_str)
    else:
        recipes = {}
    start = time.time()
    if args.multi:
        pool = Pool(cpu_count() * 2)
        results = pool.map(scraper, page_iter)
        for r in results:
            recipes.update(r)
    else:
        for i in page_iter:
            recipes.update(scraper(i))
            if i % status_interval == 0:
                print('Scraping page {} of {}'.format(i, max(page_iter)))
                quick_save(site_str, recipes)
            time.sleep(args.sleep)

    print('Scraped {} recipes from {} in {:.0f} minutes'.format(
        len(recipes), site_str, (time.time() - start) / 60))
    quick_save(site_str, recipes) 
Example #8
Source File: settings.py    From seizure-prediction with MIT License 6 votes vote down vote up
def load_settings():
    with open('SETTINGS.json') as f:
        settings = json.load(f)

    data_dir = str(settings['competition-data-dir'])
    cache_dir = str(settings['data-cache-dir'])
    submission_dir = str(settings['submission-dir'])
    N_jobs = str(settings['num-jobs'])
    N_jobs = multiprocessing.cpu_count() if N_jobs == 'auto' else int(N_jobs)

    for d in (cache_dir, submission_dir):
        try:
            os.makedirs(d)
        except:
            pass

    return Settings(data_dir=data_dir, cache_dir=cache_dir, submission_dir=submission_dir, N_jobs=N_jobs) 
Example #9
Source File: utils.py    From PathCon with MIT License 6 votes vote down vote up
def get_params_for_mp(n_triples):
    n_cores = mp.cpu_count()
    pool = mp.Pool(n_cores)
    avg = n_triples // n_cores

    range_list = []
    start = 0
    for i in range(n_cores):
        num = avg + 1 if i < n_triples - avg * n_cores else avg
        range_list.append([start, start + num])
        start += num

    return n_cores, pool, range_list


# input: [(h1, {t1, t2 ...}), (h2, {t3 ...}), ...]
# output: {(h1, t1): paths, (h1, t2): paths, (h2, t3): paths, ...} 
Example #10
Source File: reader.py    From LearnPaddle2 with Apache License 2.0 6 votes vote down vote up
def train_reader(train_list_path, crop_size, resize_size):
    father_path = os.path.dirname(train_list_path)

    def reader():
        with open(train_list_path, 'r') as f:
            lines = f.readlines()
            # 打乱图像列表
            np.random.shuffle(lines)
            # 开始获取每张图像和标签
            for line in lines:
                img, label = line.split('\t')
                img = os.path.join(father_path, img)
                yield img, label, crop_size, resize_size

    return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 102400)


# 测试图片的预处理 
Example #11
Source File: __main__.py    From jwalk with Apache License 2.0 6 votes vote down vote up
def create_parser():
    parser = ArgumentParser(description=__doc__,
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('--debug', action='store_true')
    parser.add_argument('--delimiter')
    parser.add_argument('--embedding-size', default=200, type=int)
    parser.add_argument('--graph-path')
    parser.add_argument('--has-header', action='store_true')
    parser.add_argument('--input', '-i', dest='infile', required=True)
    parser.add_argument('--log-level', '-l', type=str.upper, default='INFO')
    parser.add_argument('--num-walks', default=1, type=int)
    parser.add_argument('--model', '-m', dest='model_path')
    parser.add_argument('--output', '-o', dest='outfile', required=True)
    parser.add_argument('--stats', action='store_true')
    parser.add_argument('--undirected', action='store_true')
    parser.add_argument('--walk-length', default=10, type=int)
    parser.add_argument('--window-size', default=5, type=int)
    parser.add_argument('--workers', default=multiprocessing.cpu_count(),
                        type=int)
    return parser 
Example #12
Source File: skipgram.py    From link-prediction_with_deep-learning with MIT License 5 votes vote down vote up
def __init__(self, vocabulary_counts=None, **kwargs):

        self.vocabulary_counts = None

        kwargs["min_count"] = kwargs.get("min_count", 1)
        kwargs["workers"] = kwargs.get("workers", cpu_count())
        kwargs["size"] = kwargs.get("size", 128)
        kwargs["sentences"] = kwargs.get("sentences", None)

        if vocabulary_counts != None:
          self.vocabulary_counts = vocabulary_counts

        super(Skipgram, self).__init__(**kwargs) 
Example #13
Source File: text_reader.py    From LearnPaddle2 with Apache License 2.0 5 votes vote down vote up
def test_reader(test_list_path):

    def reader():
        with open(test_list_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                data, label = line.split('\t')
                yield data, label

    return paddle.reader.xmap_readers(test_mapper, reader, cpu_count(), 1024) 
Example #14
Source File: reader.py    From LearnPaddle2 with Apache License 2.0 5 votes vote down vote up
def test_reader(test_list_path, crop_size):
    father_path = os.path.dirname(test_list_path)

    def reader():
        with open(test_list_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                img, label = line.split('\t')
                img = os.path.join(father_path, img)
                yield img, label, crop_size

    return paddle.reader.xmap_readers(test_mapper, reader, cpu_count(), 1024) 
Example #15
Source File: graph.py    From link-prediction_with_deep-learning with MIT License 5 votes vote down vote up
def load_adjacencylist(file_, undirected=False, chunksize=10000, unchecked=True):

  if unchecked:
    parse_func = parse_adjacencylist_unchecked
    convert_func = from_adjlist_unchecked
  else:
    parse_func = parse_adjacencylist
    convert_func = from_adjlist

  adjlist = []

  t0 = time()

  with open(file_) as f:
    with ProcessPoolExecutor(max_workers=cpu_count()) as executor:
      total = 0 
      for idx, adj_chunk in enumerate(executor.map(parse_func, grouper(int(chunksize), f))):
          adjlist.extend(adj_chunk)
          total += len(adj_chunk)
  
  t1 = time()

  logger.info('Parsed {} edges with {} chunks in {}s'.format(total, idx, t1-t0))

  t0 = time()
  G = convert_func(adjlist)
  t1 = time()

  logger.info('Converted edges to graph in {}s'.format(t1-t0))

  if undirected:
    t0 = time()
    G = G.make_undirected()
    t1 = time()
    logger.info('Made graph undirected in {}s'.format(t1-t0))

  return G 
Example #16
Source File: walks.py    From link-prediction_with_deep-learning with MIT License 5 votes vote down vote up
def write_walks_to_disk(G, filebase, num_paths, path_length, alpha=0, rand=random.Random(0), num_workers=cpu_count(),
                        always_rebuild=True):
  global __current_graph
  global __vertex2str
  __current_graph = G
  __vertex2str = {v:str(v) for v in G.nodes()}
  files_list = ["{}.{}".format(filebase, str(x)) for x in xrange(num_paths)]
  expected_size = len(G)
  args_list = []
  files = []

  if num_paths <= num_workers:
    paths_per_worker = [1 for x in range(num_paths)]
  else:
    paths_per_worker = [len(filter(lambda z: z!= None, [y for y in x]))
                        for x in graph.grouper(int(num_paths / num_workers)+1, range(1, num_paths+1))]

  with ProcessPoolExecutor(max_workers=num_workers) as executor:
    for size, file_, ppw in zip(executor.map(count_lines, files_list), files_list, paths_per_worker):
      if always_rebuild or size != (ppw*expected_size):
        args_list.append((ppw, path_length, alpha, random.Random(rand.randint(0, 2**31)), file_))
      else:
        files.append(file_)

  with ProcessPoolExecutor(max_workers=num_workers) as executor:
    for file_ in executor.map(_write_walks_to_disk, args_list):
      files.append(file_)

  return files 
Example #17
Source File: process.py    From plugin.video.kmediatorrent with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = multiprocessing.cpu_count()
        else:
            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        self._result_queue = multiprocessing.Queue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        self._processes = set()

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._queue_count = 0
        self._pending_work_items = {} 
Example #18
Source File: process.py    From tornado-zh with MIT License 5 votes vote down vote up
def cpu_count():
    """Returns the number of processors on this machine."""
    if multiprocessing is None:
        return 1
    try:
        return multiprocessing.cpu_count()
    except NotImplementedError:
        pass
    try:
        return os.sysconf("SC_NPROCESSORS_CONF")
    except ValueError:
        pass
    gen_log.error("Could not detect number of processors; assuming 1")
    return 1 
Example #19
Source File: funcgroup_distance_to_id.py    From screenlamp with Apache License 2.0 5 votes vote down vote up
def get_num_cpus(n_cpus):
    if not n_cpus:
        n_cpus = cpu_count()
    elif n_cpus < 0:
        n_cpus = cpu_count() - n_cpus
    return n_cpus 
Example #20
Source File: walks.py    From link-prediction_with_deep-learning with MIT License 5 votes vote down vote up
def write_walks_to_disk(G, filebase, num_paths, path_length, alpha=0, rand=random.Random(0), num_workers=cpu_count(),
                        always_rebuild=True):
  global __current_graph
  global __vertex2str
  __current_graph = G
  __vertex2str = {v:str(v) for v in G.nodes()}
  files_list = ["{}.{}".format(filebase, str(x)) for x in xrange(num_paths)]
  expected_size = len(G)
  args_list = []
  files = []

  if num_paths <= num_workers:
    paths_per_worker = [1 for x in range(num_paths)]
  else:
    paths_per_worker = [len(filter(lambda z: z!= None, [y for y in x]))
                        for x in graph.grouper(int(num_paths / num_workers)+1, range(1, num_paths+1))]

  with ProcessPoolExecutor(max_workers=num_workers) as executor:
    for size, file_, ppw in zip(executor.map(count_lines, files_list), files_list, paths_per_worker):
      if always_rebuild or size != (ppw*expected_size):
        args_list.append((ppw, path_length, alpha, random.Random(rand.randint(0, 2**31)), file_))
      else:
        files.append(file_)

  with ProcessPoolExecutor(max_workers=num_workers) as executor:
    for file_ in executor.map(_write_walks_to_disk, args_list):
      files.append(file_)

  return files 
Example #21
Source File: process.py    From tornado-zh with MIT License 5 votes vote down vote up
def cpu_count():
    """Returns the number of processors on this machine."""
    if multiprocessing is None:
        return 1
    try:
        return multiprocessing.cpu_count()
    except NotImplementedError:
        pass
    try:
        return os.sysconf("SC_NPROCESSORS_CONF")
    except ValueError:
        pass
    gen_log.error("Could not detect number of processors; assuming 1")
    return 1 
Example #22
Source File: base.py    From mdentropy with MIT License 5 votes vote down vote up
def __init__(self, n_bins=3, rng=None, method='knn',
                 threads=None):
        self.data = None
        self.shuffled_data = None
        self.labels = None
        self.n_bins = n_bins
        self.rng = rng
        self.method = method
        self.n_threads = threads or int(cpu_count() / 2) 
Example #23
Source File: process.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = multiprocessing.cpu_count()
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        self._result_queue = multiprocessing.Queue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        self._processes = set()

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._queue_count = 0
        self._pending_work_items = {} 
Example #24
Source File: thread.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def cpu_count():
        return None 
Example #25
Source File: process_pool.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, size=0, maxtasksperchild=10000):
        if size <= 0:
            size = multiprocessing.cpu_count()
        self.size = size
        self._pool = multiprocessing.Pool(processes=size,
                                          maxtasksperchild=maxtasksperchild)
        self._stopped = False 
Example #26
Source File: thread_pool.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, min_size=1, max_size=128,
                 task_queue_size=1024, daemon=True):
        assert task_queue_size

        if not min_size or min_size <= 0:
            min_size = multiprocessing.cpu_count()

        if not max_size or max_size <= 0:
            max_size = multiprocessing.cpu_count() * 8

        self._min_size = min_size
        self._max_size = max_size
        self._daemon = daemon

        self._work_queue = queue.Queue(task_queue_size)
        self._thrs = []
        for _ in range(min_size):
            thr = threading.Thread(target=self._run)
            self._thrs.append(thr)
        self._admin_queue = queue.Queue()
        self._admin_thr = threading.Thread(target=self._do_admin)
        self._last_resize_time = time()
        self._last_size = min_size
        self._lock = threading.Lock()
        self._occupied_threads = 0
        self._count_lock = threading.Lock()
        self._started = False 
Example #27
Source File: process_pool.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, size=0, maxtasksperchild=10000):
        if size <= 0:
            size = multiprocessing.cpu_count()
        self.size = size
        self._pool = multiprocessing.Pool(processes=size,
                                          maxtasksperchild=maxtasksperchild)
        self._stopped = False 
Example #28
Source File: thread_pool.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, min_size=1, max_size=128,
                 task_queue_size=1024, daemon=True):
        assert task_queue_size

        if not min_size or min_size <= 0:
            min_size = multiprocessing.cpu_count()

        if not max_size or max_size <= 0:
            max_size = multiprocessing.cpu_count() * 8

        self._min_size = min_size
        self._max_size = max_size
        self._daemon = daemon

        self._work_queue = queue.Queue(task_queue_size)
        self._thrs = []
        for _ in range(min_size):
            thr = threading.Thread(target=self._run)
            self._thrs.append(thr)
        self._admin_queue = queue.Queue()
        self._admin_thr = threading.Thread(target=self._do_admin)
        self._last_resize_time = time()
        self._last_size = min_size
        self._lock = threading.Lock()
        self._occupied_threads = 0
        self._count_lock = threading.Lock()
        self._started = False 
Example #29
Source File: auto.py    From dcc with Apache License 2.0 5 votes vote down vote up
def __init__(self, settings):
        if not "my" in settings:
            raise ValueError("'my' object not found in settings!")

        if not "log" in settings:
            raise ValueError("'log' object not found in settings!")

        if not "max_fetcher" in settings:
            settings["max_fetcher"] = multiprocessing.cpu_count()
            l.warning("No maximum number of threads found, setting MAX_CPU: {}".format(settings["max_fetcher"]))

        self.settings = settings 
Example #30
Source File: hostinfo.py    From zun with Apache License 2.0 5 votes vote down vote up
def get_total_vcpus():
    return multiprocessing.cpu_count()