Python resource.RLIMIT_NOFILE Examples
The following are 23
code examples of resource.RLIMIT_NOFILE().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
resource
, or try the search function
.
Example #1
Source File: __main__.py From aws_list_all with MIT License | 6 votes |
def increase_limit_nofiles(): soft_limit, hard_limit = getrlimit(RLIMIT_NOFILE) desired_limit = 6000 # This should be comfortably larger than the product of services and regions if hard_limit < desired_limit: print("-" * 80, file=stderr) print( "WARNING!\n" "Your system limits the number of open files and network connections to {}.\n" "This may lead to failures during querying.\n" "Please increase the hard limit of open files to at least {}.\n" "The configuration for hard limits is often found in /etc/security/limits.conf".format( hard_limit, desired_limit ), file=stderr ) print("-" * 80, file=stderr) print(file=stderr) target_soft_limit = min(desired_limit, hard_limit) if target_soft_limit > soft_limit: print("Increasing the open connection limit \"nofile\" from {} to {}.".format(soft_limit, target_soft_limit)) setrlimit(RLIMIT_NOFILE, (target_soft_limit, hard_limit)) print("")
Example #2
Source File: util.py From BugZoo with MIT License | 6 votes |
def report_resource_limits(logger: logging.Logger) -> None: resources = [ ('CPU time (seconds)', resource.RLIMIT_CPU), ('Heap size (bytes)', resource.RLIMIT_DATA), ('Num. process', resource.RLIMIT_NPROC), ('Num. files', resource.RLIMIT_NOFILE), ('Address space', resource.RLIMIT_AS), ('Locked address space', resource.RLIMIT_MEMLOCK) ] resource_limits = [ (name, resource.getrlimit(res)) for (name, res) in resources ] resource_s = '\n'.join([ '* {}: {}'.format(res, lim) for (res, lim) in resource_limits ]) logger.info("resource limits:\n%s", indent(resource_s, 2))
Example #3
Source File: process.py From mitogen with BSD 3-Clause "New" or "Revised" License | 6 votes |
def increase_open_file_limit(): """ #549: in order to reduce the possibility of hitting an open files limit, increase :data:`resource.RLIMIT_NOFILE` from its soft limit to its hard limit, if they differ. It is common that a low soft limit is configured by default, where the hard limit is much higher. """ soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) if hard == resource.RLIM_INFINITY: hard_s = '(infinity)' # cap in case of O(RLIMIT_NOFILE) algorithm in some subprocess. hard = 524288 else: hard_s = str(hard) LOG.debug('inherited open file limits: soft=%d hard=%s', soft, hard_s) if soft >= hard: LOG.debug('max open files already set to hard limit: %d', hard) return # OS X is limited by kern.maxfilesperproc sysctl, rather than the # advertised unlimited hard RLIMIT_NOFILE. Just hard-wire known defaults # for that sysctl, to avoid the mess of querying it. for value in (hard, 10240): try: resource.setrlimit(resource.RLIMIT_NOFILE, (value, hard)) LOG.debug('raised soft open file limit from %d to %d', soft, value) break except ValueError as e: LOG.debug('could not raise soft open file limit from %d to %d: %s', soft, value, e)
Example #4
Source File: test_fd.py From landscape-client with GNU General Public License v2.0 | 6 votes |
def test_ignore_OSErrors(self): """ If os.close raises an OSError, it is ignored and we continue to close the rest of the FDs. """ closed_fds = [] def remember_and_throw(fd): closed_fds.append(fd) raise OSError("Bad FD!") with patch("os.close", side_effect=remember_and_throw) as close_mock: with self.mock_getrlimit(10) as getrlimit_mock: clean_fds() getrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE) expected_fds = list(range(3, 10)) calls = [call(i) for i in expected_fds] close_mock.assert_has_calls(calls, any_order=True) self.assertEqual(closed_fds, expected_fds)
Example #5
Source File: test_fd.py From landscape-client with GNU General Public License v2.0 | 6 votes |
def test_clean_fds_sanity(self): """ If the process limit for file descriptors is very high (> 4096), then we only close 4096 file descriptors. """ closed_fds = [] with patch("os.close", side_effect=closed_fds.append) as close_mock: with self.mock_getrlimit(4100) as getrlimit_mock: clean_fds() getrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE) expected_fds = list(range(3, 4096)) calls = [call(i) for i in expected_fds] close_mock.assert_has_calls(calls, any_order=True) self.assertEqual(closed_fds, expected_fds)
Example #6
Source File: test_selectors.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = min(hard, 2**16) except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise self.assertEqual(NUM_FDS // 2, len(s.select()))
Example #7
Source File: main.py From edgedb with Apache License 2.0 | 5 votes |
def bump_rlimit_nofile() -> None: try: fno_soft, fno_hard = resource.getrlimit(resource.RLIMIT_NOFILE) except resource.error: logger.warning('could not read RLIMIT_NOFILE') else: if fno_soft < defines.EDGEDB_MIN_RLIMIT_NOFILE: try: resource.setrlimit( resource.RLIMIT_NOFILE, (min(defines.EDGEDB_MIN_RLIMIT_NOFILE, fno_hard), fno_hard)) except resource.error: logger.warning('could not set RLIMIT_NOFILE')
Example #8
Source File: server.py From qinling with Apache License 2.0 | 5 votes |
def _set_ulimit(): """Limit resources usage for the current process and/or its children. Refer to https://docs.python.org/2.7/library/resource.html """ customized_limits = { resource.RLIMIT_NOFILE: 1024, resource.RLIMIT_NPROC: 128, # TODO(lxkong): 50M by default, need to be configurable in future. resource.RLIMIT_FSIZE: 524288000 } for t, soft in list(customized_limits.items()): _, hard = resource.getrlimit(t) resource.setrlimit(t, (soft, hard))
Example #9
Source File: server.py From qinling with Apache License 2.0 | 5 votes |
def _set_ulimit(): """Limit resources usage for the current process and/or its children. Refer to https://docs.python.org/2.7/library/resource.html """ customized_limits = { resource.RLIMIT_NOFILE: 1024, resource.RLIMIT_NPROC: 128, # TODO(lxkong): 50M by default, need to be configurable in future. resource.RLIMIT_FSIZE: 524288000 } for t, soft in customized_limits.items(): _, hard = resource.getrlimit(t) resource.setrlimit(t, (soft, hard))
Example #10
Source File: mounter.py From gitfs with Apache License 2.0 | 5 votes |
def start_fuse(): parser = argparse.ArgumentParser(prog="GitFS") args = parse_args(parser) try: merge_worker, fetch_worker, router = prepare_components(args) except: return if args.max_open_files != -1: resource.setrlimit( resource.RLIMIT_NOFILE, (args.max_open_files, args.max_open_files) ) # ready to mount it if sys.platform == "darwin": FUSE( router, args.mount_point, foreground=args.foreground, allow_root=args.allow_root, allow_other=args.allow_other, fsname=args.remote_url, subtype="gitfs", ) else: FUSE( router, args.mount_point, foreground=args.foreground, nonempty=True, allow_root=args.allow_root, allow_other=args.allow_other, fsname=args.remote_url, subtype="gitfs", )
Example #11
Source File: test_processutils.py From oslo.concurrency with Apache License 2.0 | 5 votes |
def test_number_files(self): nfiles = self.soft_limit(resource.RLIMIT_NOFILE, 1, 1024) prlimit = processutils.ProcessLimits(number_files=nfiles) self.check_limit(prlimit, 'RLIMIT_NOFILE', nfiles)
Example #12
Source File: test_selectors.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 5 votes |
def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = min(hard, 2**16) except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise self.assertEqual(NUM_FDS // 2, len(s.select()))
Example #13
Source File: test_selectors.py From annotated-py-projects with MIT License | 5 votes |
def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = min(hard, 2**16) except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise self.assertEqual(NUM_FDS // 2, len(s.select()))
Example #14
Source File: launcher.py From opsbro with MIT License | 5 votes |
def __find_and_set_higer_system_limits(self): resource = get_resource_lib() if not resource: logger.info('System resource package is not available, cannot increase system limits') return for (res, res_name) in [(resource.RLIMIT_NPROC, 'number of process/threads'), (resource.RLIMIT_NOFILE, 'number of open files')]: self.__find_and_set_higer_system_limit(res, res_name)
Example #15
Source File: dispatch.py From accelerator with Apache License 2.0 | 5 votes |
def update_valid_fds(): # Collect all valid fds, so we can close them in job processes global valid_fds valid_fds = [] from fcntl import fcntl, F_GETFD from resource import getrlimit, RLIMIT_NOFILE for fd in range(3, getrlimit(RLIMIT_NOFILE)[0]): try: fcntl(fd, F_GETFD) valid_fds.append(fd) except Exception: pass
Example #16
Source File: test_selectors.py From ironpython3 with Apache License 2.0 | 5 votes |
def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = min(hard, 2**16) except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise self.assertEqual(NUM_FDS // 2, len(s.select()))
Example #17
Source File: test_fd.py From landscape-client with GNU General Public License v2.0 | 5 votes |
def test_clean_fds_rlimit(self, close_mock): """ L{clean_fds} cleans all non-stdio file descriptors up to the process limit for file descriptors. """ with self.mock_getrlimit(10) as getrlimit_mock: clean_fds() calls = [call(i) for i in range(3, 10)] close_mock.assert_has_calls(calls, any_order=True) getrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE)
Example #18
Source File: test_memory.py From manticore with GNU Affero General Public License v3.0 | 5 votes |
def get_open_fds(self): fds = [] for fd in range(3, resource.RLIMIT_NOFILE): try: flags = fcntl.fcntl(fd, fcntl.F_GETFD) except IOError: continue fds.append(fd) return fds # python3's unittest does not have this function, so we need to implement it ourselves
Example #19
Source File: linux.py From manticore with GNU Affero General Public License v3.0 | 5 votes |
def sys_dup2(self, fd: int, newfd: int) -> int: """ Duplicates an open fd to newfd. If newfd is open, it is first closed :param fd: the open file descriptor to duplicate. :param newfd: the file descriptor to alias the file described by fd. :return: newfd. """ try: f = self._get_fdlike(fd) except FdError as e: logger.info("sys_dup2: fd ({fd}) is not open. Returning -{errorcode(e.err)}") return -e.err soft_max, hard_max = self._rlimits[resource.RLIMIT_NOFILE] if newfd >= soft_max: logger.info( f"sys_dup2: newfd ({newfd}) is above max descriptor table size ({soft_max})" ) return -errno.EBADF if self._is_fd_open(newfd): self._close(newfd) self.fd_table.add_entry_at(f, fd) logger.debug("sys_dup2(%d,%d) -> %d", fd, newfd, newfd) return newfd
Example #20
Source File: db_transfer.py From shadowsocks with Apache License 2.0 | 5 votes |
def thread_db(obj): import socket import time global db_instance timeout = 60 socket.setdefaulttimeout(timeout) last_rows = [] db_instance = obj() shell.log_shadowsocks_version() try: import resource logging.info( 'current process RLIMIT_NOFILE resource: soft %d hard %d' % resource.getrlimit( resource.RLIMIT_NOFILE)) except: pass try: while True: load_config() try: db_instance.push_db_all_user() rows = db_instance.pull_db_all_user() db_instance.del_server_out_of_bound_safe(last_rows, rows) db_instance.detect_text_ischanged = False db_instance.detect_hex_ischanged = False last_rows = rows except Exception as e: trace = traceback.format_exc() logging.error(trace) # logging.warn('db thread except:%s' % e) if db_instance.event.wait(60) or not db_instance.is_all_thread_alive(): break if db_instance.has_stopped: break except KeyboardInterrupt as e: pass db_instance.del_servers() ServerPool.get_instance().stop() db_instance = None
Example #21
Source File: watchdog.py From landscape-client with GNU General Public License v2.0 | 4 votes |
def startService(self): Service.startService(self) bootstrap_list.bootstrap(data_path=self._config.data_path, log_dir=self._config.log_dir) if self._config.clones > 0: # Let clones open an appropriate number of fds setrlimit(RLIMIT_NOFILE, (self._config.clones * 100, self._config.clones * 200)) # Increase the timeout of AMP's MethodCalls. # XXX: we should find a better way to expose this knot, and # not set it globally on the class from landscape.lib.amp import MethodCallSender MethodCallSender.timeout = 300 # Create clones log and data directories for i in range(self._config.clones): suffix = "-clone-%d" % i bootstrap_list.bootstrap( data_path=self._config.data_path + suffix, log_dir=self._config.log_dir + suffix) result = succeed(None) result.addCallback(lambda _: self.watchdog.check_running()) def start_if_not_running(running_daemons): if running_daemons: error("ERROR: The following daemons are already running: %s" % (", ".join(x.program for x in running_daemons))) self.exit_code = 1 reactor.crash() # so stopService isn't called. return self._daemonize() info("Watchdog watching for daemons.") return self.watchdog.start() def die(failure): log_failure(failure, "Unknown error occurred!") self.exit_code = 2 reactor.crash() result.addCallback(start_if_not_running) result.addErrback(die) return result
Example #22
Source File: linux.py From manticore with GNU Affero General Public License v3.0 | 4 votes |
def __init__( self, program: Optional[str], argv: List[str] = [], envp: List[str] = [], disasm: str = "capstone", **kwargs, ): """ Builds a Linux OS platform :param string program: The path to ELF binary :param string disasm: Disassembler to be used :param list argv: The argv array; not including binary. :param list envp: The ENV variables. """ super().__init__(path=program, **kwargs) self.program = program self.clocks: int = 0 self.fd_table: FdTable = FdTable() # A cache for keeping state when reading directories { fd: dent_iter } self._getdents_c: Dict[int, Any] = {} self._closed_files: List[FdLike] = [] self.syscall_trace: List[Tuple[str, int, bytes]] = [] # Many programs to support SLinux self.programs = program self.disasm = disasm self.envp = envp self.argv = argv self.stubs = SyscallStubs(parent=self) # dict of [int -> (int, int)] where tuple is (soft, hard) limits self._rlimits = { resource.RLIMIT_NOFILE: (256, 1024), resource.RLIMIT_STACK: (8192 * 1024, 0), } if program is not None: self.elf = ELFFile(open(program, "rb")) # FIXME (theo) self.arch is actually mode as initialized in the CPUs, # make things consistent and perhaps utilize a global mapping for this self.arch = {"x86": "i386", "x64": "amd64", "ARM": "armv7", "AArch64": "aarch64"}[ self.elf.get_machine_arch() ] self._init_cpu(self.arch) self._init_std_fds() self._execve(program, argv, envp)
Example #23
Source File: web_transfer.py From shadowsocks with Apache License 2.0 | 4 votes |
def thread_db(obj): import socket import time import webapi_utils global db_instance global webapi timeout = 60 socket.setdefaulttimeout(timeout) last_rows = [] db_instance = obj() webapi = webapi_utils.WebApi() shell.log_shadowsocks_version() try: import resource logging.info( 'current process RLIMIT_NOFILE resource: soft %d hard %d' % resource.getrlimit( resource.RLIMIT_NOFILE)) except: pass try: while True: load_config() try: ping = webapi.getApi('func/ping') if ping is None: logging.error( 'something wrong with your http api, please check your config and website status and try again later.') else: db_instance.push_db_all_user() rows = db_instance.pull_db_all_user() db_instance.del_server_out_of_bound_safe( last_rows, rows) last_rows = rows except Exception as e: trace = traceback.format_exc() logging.error(trace) # logging.warn('db thread except:%s' % e) if db_instance.event.wait(60) or not db_instance.is_all_thread_alive(): break if db_instance.has_stopped: break except KeyboardInterrupt as e: pass db_instance.del_servers() ServerPool.get_instance().stop() db_instance = None