Python contextlib.suppress() Examples
The following are 30
code examples of contextlib.suppress().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
contextlib
, or try the search function
.
Example #1
Source File: _socketcan.py From pyuavcan with MIT License | 9 votes |
def _make_socket(iface_name: str, can_fd: bool) -> socket.SocketType: s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) try: s.bind((iface_name,)) s.setsockopt(socket.SOL_SOCKET, _SO_TIMESTAMP, 1) # timestamping if can_fd: s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FD_FRAMES, 1) s.setblocking(False) if 0 != s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR): raise OSError('Could not configure the socket: getsockopt(SOL_SOCKET, SO_ERROR) != 0') except BaseException: with contextlib.suppress(Exception): s.close() raise return s
Example #2
Source File: svr_threads.py From Pyro5 with MIT License | 7 votes |
def close(self): if self.housekeeper: self.housekeeper.stop.set() self.housekeeper.join() self.housekeeper = None if self.sock: with contextlib.suppress(socket.error, OSError): sockname = self.sock.getsockname() with contextlib.suppress(Exception): self.sock.close() if type(sockname) is str: # it was a Unix domain socket, remove it from the filesystem if os.path.exists(sockname): os.remove(sockname) self.sock = None self.pool.close()
Example #3
Source File: client.py From tf-yarn with Apache License 2.0 | 6 votes |
def _aggregate_events( kv: skein.kv.KeyValueStore, events: Dict[str, Dict[str, str]] ) -> None: """ Aggregate events from all dispatched tasks. The lifecycle of a task consists of three stages: * init which carries the reserved socket address, * start with no payload, and * stop with an optional formatted exception. """ # ``ConnectionError`` indicates that the app has finished and # the AM is down. queue = kv.events(event_type="PUT") with suppress(skein.exceptions.ConnectionError), queue: for evt in queue: if "/" in evt.key: task, stage = evt.key.rsplit("/", 1) events[task][stage] = evt.result.value.decode()
Example #4
Source File: test_reloader.py From sanic with MIT License | 6 votes |
def test_reloader_live(runargs, mode): with TemporaryDirectory() as tmpdir: filename = os.path.join(tmpdir, "reloader.py") text = write_app(filename, **runargs) proc = Popen(argv[mode], cwd=tmpdir, stdout=PIPE, creationflags=flags) try: timeout = Timer(5, terminate, [proc]) timeout.start() # Python apparently keeps using the old source sometimes if # we don't sleep before rewrite (pycache timestamp problem?) sleep(1) line = scanner(proc) assert text in next(line) # Edit source code and try again text = write_app(filename, **runargs) assert text in next(line) finally: timeout.cancel() terminate(proc) with suppress(TimeoutExpired): proc.wait(timeout=3)
Example #5
Source File: core.py From django-more with BSD 3-Clause "New" or "Revised" License | 6 votes |
def cls(self, target, source=None): if isinstance(target, str): target = resolve(target, package=self.target) if self.source and source is None: with suppress(ImportError): source_str = '{mod}.{cls}'.format( mod=target.__module__.replace('.', self.module_sep), cls=target.__name__) source = resolve(source_str, package=self.source) if not source: with suppress(AttributeError): source = getattr(self.source, target.__name__) elif isinstance(source, str): source = resolve(source, package=self.source) if isinstance(target, type): return PatchClass(target, source) raise TypeError('Must be a valid class or class name')
Example #6
Source File: socketutil.py From Pyro5 with MIT License | 6 votes |
def bind_unused_port(sock: socket.socket, host: Union[str, ipaddress.IPv4Address, ipaddress.IPv6Address] = 'localhost') -> int: """Bind the socket to a free port and return the port number. This code is based on the code in the stdlib's test.test_support module.""" if sock.family in (socket.AF_INET, socket.AF_INET6) and sock.type == socket.SOCK_STREAM: if hasattr(socket, "SO_EXCLUSIVEADDRUSE"): with contextlib.suppress(socket.error): sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) if not isinstance(host, str): host = str(host) if sock.family == socket.AF_INET: if host == 'localhost': sock.bind(('127.0.0.1', 0)) else: sock.bind((host, 0)) elif sock.family == socket.AF_INET6: if host == 'localhost': sock.bind(('::1', 0, 0, 0)) else: sock.bind((host, 0, 0, 0)) else: raise CommunicationError("unsupported socket family: " + str(sock.family)) return sock.getsockname()[1]
Example #7
Source File: client.py From tf-yarn with Apache License 2.0 | 6 votes |
def _shutdown_on_exception(app: skein.ApplicationClient): # Ensure SIGINT is not masked to enable kill on C-c. import signal signal.signal(signal.SIGINT, signal.default_int_handler) try: yield except (KeyboardInterrupt, SystemExit): with suppress(SkeinError): app.shutdown(FinalStatus.KILLED) logger.error("Application killed on user request") except Exception: with suppress(SkeinError): app.shutdown(FinalStatus.FAILED) logger.exception("Application shutdown due to an exception") raise
Example #8
Source File: client.py From Pyro5 with MIT License | 6 votes |
def close(self): if self.proxy and self.proxy._pyroConnection is not None: if self.pyroseq == self.proxy._pyroSeq: # we're still in sync, it's okay to use the same proxy to close this stream self.proxy._pyroInvoke("close_stream", [self.streamId], {}, flags=protocol.FLAGS_ONEWAY, objectId=core.DAEMON_NAME) else: # The proxy's sequence number has diverged. # One of the reasons this can happen is because this call is being done from python's GC where # it decides to gc old iterator objects *during a new call on the proxy*. # If we use the same proxy and do a call in between, the other call on the proxy will get an out of sync seq and crash! # We create a temporary second proxy to call close_stream on. This is inefficient, but avoids the problem. with contextlib.suppress(errors.CommunicationError): with self.proxy.__copy__() as closingProxy: closingProxy._pyroInvoke("close_stream", [self.streamId], {}, flags=protocol.FLAGS_ONEWAY, objectId=core.DAEMON_NAME) self.proxy = None
Example #9
Source File: blender_renderer.py From 3D-R2N2 with MIT License | 6 votes |
def render_voxel(self, pred, thresh=0.4, image_path=os.path.join(cfg.RENDERING.BLENDER_TMP_DIR, 'tmp.png')): # Cleanup the scene self.clearModel() out_f = os.path.join(cfg.RENDERING.BLENDER_TMP_DIR, 'tmp.obj') occupancy = pred > thresh vertices, faces = voxel2mesh(occupancy) with contextlib.suppress(IOError): os.remove(out_f) write_obj(out_f, vertices, faces) # Load the obj bpy.ops.import_scene.obj(filepath=out_f) bpy.context.scene.render.filepath = image_path bpy.ops.render.render(write_still=True) # save straight to file im = np.array(Image.open(image_path)) # read the image # Last channel is the alpha channel (transparency) return im[:, :, :3], im[:, :, 3]
Example #10
Source File: svr_threads.py From Pyro5 with MIT License | 6 votes |
def events(self, eventsockets): """used for external event loops: handle events that occur on one of the sockets of this server""" # we only react on events on our own server socket. # all other (client) sockets are owned by their individual threads. assert self.sock in eventsockets with contextlib.suppress(socket.timeout): # just continue the loop on a timeout on accept events = self._selector.select(config.POLLTIMEOUT) if not events: return csock, caddr = self.sock.accept() if self.shutting_down: csock.close() return if hasattr(csock, "getpeercert"): log.debug("connected %s - SSL", caddr) else: log.debug("connected %s - unencrypted", caddr) if config.COMMTIMEOUT: csock.settimeout(config.COMMTIMEOUT) job = ClientConnectionJob(csock, caddr, self.daemon) try: self.pool.process(job) except NoFreeWorkersError: job.denyConnection("no free workers, increase server threadpool size")
Example #11
Source File: test_socketutil.py From Pyro5 with MIT License | 6 votes |
def testCreateUnboundSockets6(self): if not has_ipv6: pytest.skip("no ipv6 capability") s = socketutil.create_socket(ipv6=True) assert socket.AF_INET6 == s.family bs = socketutil.create_bc_socket(ipv6=True) assert socket.AF_INET6 == bs.family with contextlib.suppress(socket.error): host, port, _, _ = s.getsockname() # can either fail with socket.error or return (host,0) assert 0 == port with contextlib.suppress(socket.error): host, port, _, _ = bs.getsockname() # can either fail with socket.error or return (host,0) assert 0 == port s.close() bs.close()
Example #12
Source File: _pick_info.py From mplcursors with MIT License | 6 votes |
def _register_scatter(): """ Patch `PathCollection` and `scatter` to register their return values. This registration allows us to distinguish `PathCollection`s created by `Axes.scatter`, which should use point-like picking, from others, which should use path-like picking. The former is more common, so we store the latter instead; this also lets us guess the type better if this module is imported late. """ @functools.wraps(PathCollection.__init__) def __init__(self, *args, **kwargs): _nonscatter_pathcollections.add(self) return __init__.__wrapped__(self, *args, **kwargs) PathCollection.__init__ = __init__ @functools.wraps(Axes.scatter) def scatter(*args, **kwargs): paths = scatter.__wrapped__(*args, **kwargs) with suppress(KeyError): _nonscatter_pathcollections.remove(paths) return paths Axes.scatter = scatter
Example #13
Source File: test_aiohttp.py From sentry-python with BSD 2-Clause "Simplified" License | 6 votes |
def test_cancelled_error_not_captured( sentry_init, aiohttp_client, loop, capture_events ): sentry_init(integrations=[AioHttpIntegration()]) async def hello(request): raise asyncio.CancelledError() app = web.Application() app.router.add_get("/", hello) events = capture_events() client = await aiohttp_client(app) with suppress(ServerDisconnectedError): # Intended `aiohttp` interaction: server will disconnect if it # encounters `asyncio.CancelledError` await client.get("/") assert not events
Example #14
Source File: resources.py From cloud-inquisitor with Apache License 2.0 | 6 votes |
def update(self, data, properties): updated = self.set_property('location', properties['location']) updated |= self.set_property('creation_date', data.creation_date) updated |= self.set_property('bucket_policy', properties['bucket_policy']) updated |= self.set_property('website_enabled', properties['website_enabled']) updated |= self.set_property('metrics', properties['metrics']) with suppress(ClientError): tags = {t['Key']: t['Value'] for t in data.Tagging().tag_set} existing_tags = {x.key: x for x in self.tags} # Check for new tags for key, value in list(tags.items()): updated |= self.set_tag(key, value) # Check for updated or removed tags for key in list(existing_tags.keys()): if key not in tags: updated |= self.delete_tag(key) return updated
Example #15
Source File: __init__.py From pmdarima with MIT License | 5 votes |
def cythonize_extensions(top_path, config): """Check that a recent Cython is available and cythonize extensions""" _check_cython_version() from Cython.Build import cythonize # Fast fail before cythonization if compiler fails compiling basic test # code even without OpenMP basic_check_build() # check simple compilation with OpenMP. If it fails scikit-learn will be # built without OpenMP and the test test_openmp_supported in the test suite # will fail. # `check_openmp_support` compiles a small test program to see if the # compilers are properly configured to build with OpenMP. This is expensive # and we only want to call this function once. # The result of this check is cached as a private attribute on the sklearn # module (only at build-time) to be used twice: # - First to set the value of SKLEARN_OPENMP_PARALLELISM_ENABLED, the # cython build-time variable passed to the cythonize() call. # - Then in the build_ext subclass defined in the top-level setup.py file # to actually build the compiled extensions with OpenMP flags if needed. n_jobs = 1 with contextlib.suppress(ImportError): import joblib if LooseVersion(joblib.__version__) > LooseVersion("0.13.0"): # earlier joblib versions don't account for CPU affinity # constraints, and may over-estimate the number of available # CPU particularly in CI (cf loky#114) n_jobs = joblib.cpu_count() config.ext_modules = cythonize( config.ext_modules, nthreads=n_jobs, compiler_directives={'language_level': 3})
Example #16
Source File: containerized_tool.py From benchexec with Apache License 2.0 | 5 votes |
def __init__(self, tool_module, config): """Load tool-info module in subprocess. @param tool_module: The name of the module to load. Needs to define class named Tool. @param config: A config object suitable for benchexec.containerexecutor.handle_basic_container_args() """ # We use multiprocessing.Pool as an easy way for RPC with another process. self._pool = multiprocessing.Pool(1, _init_worker_process) container_options = containerexecutor.handle_basic_container_args(config) temp_dir = tempfile.mkdtemp(prefix="Benchexec_tool_info_container_") # Call function that loads tool module and returns its doc try: self.__doc__ = self._pool.apply( _init_container_and_load_tool, [tool_module, temp_dir], container_options, ) except BaseException as e: self._pool.terminate() raise e finally: # Outside the container, the temp_dir is just an empty directory, because # the tmpfs mount is only visible inside. We can remove it immediately. with contextlib.suppress(OSError): os.rmdir(temp_dir)
Example #17
Source File: _mplcursors.py From mplcursors with MIT License | 5 votes |
def remove(self): """ Remove a cursor. Remove all `Selection`\\s, disconnect all callbacks, and allow the cursor to be garbage collected. """ for disconnectors in self._disconnectors: disconnectors() for sel in self.selections: self.remove_selection(sel) for s in type(self)._keep_alive.values(): with suppress(KeyError): s.remove(self)
Example #18
Source File: core.py From django-more with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __setitem__(self, key, value): # Strip inbult decorators if isinstance(value, (classmethod, staticmethod)): value = value.__func__ with suppress(AttributeError): key = key.__code__ return super().__setitem__(id(key), value)
Example #19
Source File: core.py From django-more with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __contains__(self, key): with suppress(AttributeError): key = key.__code__ return super().__contains__(id(key))
Example #20
Source File: utils.py From zoo with Apache License 2.0 | 5 votes |
def get_distribution_scope(batch_size): if num_gpus() > 1: strategy = tf.distribute.MirroredStrategy() assert ( batch_size % strategy.num_replicas_in_sync == 0 ), f"Batch size {batch_size} cannot be divided onto {num_gpus()} GPUs" distribution_scope = strategy.scope else: if sys.version_info >= (3, 7): distribution_scope = contextlib.nullcontext else: distribution_scope = contextlib.suppress return distribution_scope()
Example #21
Source File: core.py From django-more with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __getitem__(self, key): with suppress(AttributeError): key = key.__code__ with suppress(KeyError): return super().__getitem__(id(key)) raise RuntimeError('Patched func cannot find its predecessor')
Example #22
Source File: socketutil.py From Pyro5 with MIT License | 5 votes |
def close(self) -> None: if self.keep_open: return with contextlib.suppress(Exception): self.sock.shutdown(socket.SHUT_RDWR) with contextlib.suppress(Exception): self.sock.close() self.pyroInstances = {} # release the session instances for rsc in self.tracked_resources: with contextlib.suppress(Exception): rsc.close() # it is assumed a 'resource' has a close method. self.tracked_resources.clear()
Example #23
Source File: core.py From lm-human-preferences with MIT License | 5 votes |
def variables_on_gpu(): """Prevent variables from accidentally being placed on the CPU. This dodges an obscure bug in tf.train.init_from_checkpoint. """ if _our_gpu() is None: return contextlib.suppress() def device(op): return '/gpu:0' if op.type == 'VarHandleOp' else '' return tf.device(device)
Example #24
Source File: connection.py From aiozk with MIT License | 5 votes |
def close(self, timeout): if self.closing: return self.closing = True if self.read_loop_task: self.read_loop_task.cancel() with suppress(asyncio.CancelledError): await self.read_loop_task if self.pending or (self.pending_specials and self.pending_specials != {None: []}): log.warning('Pendings: {}; specials: {}'.format(self.pending, self.pending_specials)) try: # await list(pending_with_timeouts) self.abort(exception=exc.TimeoutError) # wlist = list(self.drain_all_pending()) # log.warning('Wait for list: {} {}'.format(wlist, self.pending)) # if len(wlist) > 0: # await asyncio.wait(wlist, timeout=timeout) except asyncio.TimeoutError: log.warning('ABORT Timeout') await self.abort(exception=exc.TimeoutError) except Exception as e: log.exception('in close: {}'.format(e)) raise e finally: log.debug('Closing writer') self.writer.close() log.debug('Writer closed')
Example #25
Source File: googleconnector.py From resolwe with Apache License 2.0 | 5 votes |
def delete(self, url, urls): """Remove objects.""" # At most 1000 objects can be deleted at the same time. max_chunk_length = 1000 for i in range(0, len(urls), max_chunk_length): with suppress(NotFound): next_chunk = urls[i : i + max_chunk_length] with self.client.batch(): for delete_url in next_chunk: blob = self.bucket.blob( os.fspath(self.base_path / url / delete_url) ) blob.delete()
Example #26
Source File: apps.py From resolwe with Apache License 2.0 | 5 votes |
def _check_connector_settings(self): """Validate the storage connector settings in the django config. When there exists a section that does not match any known storage connector then error is logged. """ for connector_name, connector_settings in STORAGE_CONNECTORS.items(): if connector_name not in connectors: full_class_name = connector_settings.get("connector") class_exists = False is_subclass = False with suppress(Exception): module_name, class_name = full_class_name.rsplit(".", 1) module = import_module(module_name) class_exists = hasattr(module, class_name) if class_exists: is_subclass = issubclass( getattr(module, class_name), BaseStorageConnector ) message = "Connector named {} using class {} is not registered.".format( connector_name, full_class_name ) if not class_exists: message += " Class does not exist." elif not is_subclass: message += " Class is not a subclass of BaseStorageConnector." logger.warning(message)
Example #27
Source File: test_socketutil.py From Pyro5 with MIT License | 5 votes |
def testCreateUnboundSockets(self): s = socketutil.create_socket() assert socket.AF_INET == s.family bs = socketutil.create_bc_socket() assert socket.AF_INET == bs.family with contextlib.suppress(socket.error): host, port = s.getsockname() # can either fail with socket.error or return (host,0) assert 0 == port with contextlib.suppress(socket.error): host, port = bs.getsockname() # can either fail with socket.error or return (host,0) assert 0 == port s.close() bs.close()
Example #28
Source File: nameserver.py From Pyro5 with MIT License | 5 votes |
def close(self): log.debug("ns broadcast server closing") self.running = False with contextlib.suppress(OSError, socket.error): self.sock.shutdown(socket.SHUT_RDWR) self.sock.close()
Example #29
Source File: socketutil.py From Pyro5 with MIT License | 5 votes |
def interrupt_socket(address: Tuple[str, int]) -> None: """bit of a hack to trigger a blocking server to get out of the loop, useful at clean shutdowns""" with contextlib.suppress(socket.error): sock = create_socket(connect=address, keepalive=False, timeout=-1) with contextlib.suppress(socket.error, AttributeError): sock.sendall(b"!" * 16) with contextlib.suppress(OSError, socket.error): sock.shutdown(socket.SHUT_RDWR) sock.close()
Example #30
Source File: socketutil.py From Pyro5 with MIT License | 5 votes |
def set_keepalive(sock: socket.socket) -> None: """sets the SO_KEEPALIVE option on the socket, if possible.""" with contextlib.suppress(Exception): sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)