Python twisted.internet.error.TCPTimedOutError() Examples
The following are 10
code examples of twisted.internet.error.TCPTimedOutError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
twisted.internet.error
, or try the search function
.
Example #1
Source File: aiqiyi_spider.py From video_url_crawler_demo with GNU General Public License v3.0 | 6 votes |
def errback_httpbin(self, failure): # log all failures self.logger.error(repr(failure)) # in case you want to do something special for some errors, # you may need the failure's type: if failure.check(HttpError): # these exceptions come from HttpError spider middleware # you can get the non-200 response response = failure.value.response self.logger.error('HttpError on %s', response.url) elif failure.check(DNSLookupError): # this is the original request request = failure.request self.logger.error('DNSLookupError on %s', request.url) elif failure.check(TimeoutError, TCPTimedOutError): request = failure.request self.logger.error('TimeoutError on %s', request.url)
Example #2
Source File: test_errorback.py From poi_spider with Apache License 2.0 | 6 votes |
def errback_httpbin(self, failure): # log all failures self.logger.error(repr(failure)) # in case you want to do something special for some errors, # you may need the failure's type: if failure.check(HttpError): print("HttpError出错了") # these exceptions come from HttpError spider middleware # you can get the non-200 response response = failure.value.response self.logger.error('HttpError on %s', response.url) elif failure.check(DNSLookupError): # this is the original request request = failure.request self.logger.error('DNSLookupError on %s', request.url) elif failure.check(TimeoutError, TCPTimedOutError): request = failure.request self.logger.error('TimeoutError on %s', request.url)
Example #3
Source File: middlewares.py From SourceCodeOfBook with MIT License | 5 votes |
def process_exception(self, request, exception, spider): if spider.name == 'exceptionSpider' and isinstance(exception, TCPTimedOutError): self.remove_borken_proxy(request.meta['proxy']) return request.copy()
Example #4
Source File: socks5.py From Safejumper-for-Desktop with GNU General Public License v2.0 | 5 votes |
def handleCmdConnectFailure(self, failure): log.error("CMD CONNECT: %s" % failure.getErrorMessage()) # Map common twisted errors to SOCKS error codes if failure.type == error.NoRouteError: self.sendReply(SOCKSv5Reply.NetworkUnreachable) elif failure.type == error.ConnectionRefusedError: self.sendReply(SOCKSv5Reply.ConnectionRefused) elif failure.type == error.TCPTimedOutError or failure.type == error.TimeoutError: self.sendReply(SOCKSv5Reply.TTLExpired) elif failure.type == error.UnsupportedAddressFamily: self.sendReply(SOCKSv5Reply.AddressTypeNotSupported) elif failure.type == error.ConnectError: # Twisted doesn't have a exception defined for EHOSTUNREACH, # so the failure is a ConnectError. Try to catch this case # and send a better reply, but fall back to a GeneralFailure. reply = SOCKSv5Reply.GeneralFailure try: import errno if hasattr(errno, "EHOSTUNREACH"): if failure.value.osError == errno.EHOSTUNREACH: reply = SOCKSv5Reply.HostUnreachable if hasattr(errno, "WSAEHOSTUNREACH"): if failure.value.osError == errno.WSAEHOSTUNREACH: reply = SOCKSv5Reply.HostUnreachable except Exception: pass self.sendReply(reply) else: self.sendReply(SOCKSv5Reply.GeneralFailure) failure.trap(error.NoRouteError, error.ConnectionRefusedError, error.TCPTimedOutError, error.TimeoutError, error.UnsupportedAddressFamily, error.ConnectError)
Example #5
Source File: test_error.py From Safejumper-for-Desktop with GNU General Public License v2.0 | 5 votes |
def test_errno(self): """ L{error.getConnectError} converts based on errno for C{socket.error}. """ self.assertErrnoException(errno.ENETUNREACH, error.NoRouteError) self.assertErrnoException(errno.ECONNREFUSED, error.ConnectionRefusedError) self.assertErrnoException(errno.ETIMEDOUT, error.TCPTimedOutError) if platformType == "win32": self.assertErrnoException(errno.WSAECONNREFUSED, error.ConnectionRefusedError) self.assertErrnoException(errno.WSAENETUNREACH, error.NoRouteError)
Example #6
Source File: test_error.py From learn_python3_spider with MIT License | 5 votes |
def test_errno(self): """ L{error.getConnectError} converts based on errno for C{socket.error}. """ self.assertErrnoException(errno.ENETUNREACH, error.NoRouteError) self.assertErrnoException(errno.ECONNREFUSED, error.ConnectionRefusedError) self.assertErrnoException(errno.ETIMEDOUT, error.TCPTimedOutError) if platformType == "win32": self.assertErrnoException(errno.WSAECONNREFUSED, error.ConnectionRefusedError) self.assertErrnoException(errno.WSAENETUNREACH, error.NoRouteError)
Example #7
Source File: textspider.py From ARGUS with GNU General Public License v3.0 | 4 votes |
def errorback(self, failure): loader = ItemLoader(item=Collector()) if failure.check(HttpError): response = failure.value.response loader.add_value("dl_slot", response.request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", response.status) loader.add_value("ID", response.request.meta["ID"]) yield loader.load_item() elif failure.check(DNSLookupError): request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", "DNS") loader.add_value("ID", request.meta["ID"]) yield loader.load_item() elif failure.check(TimeoutError, TCPTimedOutError): request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", "Timeout") loader.add_value("ID", request.meta["ID"]) yield loader.load_item() else: request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", "other") loader.add_value("ID", request.meta["ID"]) yield loader.load_item() ################################################################## # MAIN PARSE ##################################################################
Example #8
Source File: linkspider.py From ARGUS with GNU General Public License v3.0 | 4 votes |
def errorback(self, failure): loader = ItemLoader(item=LinkCollector()) if failure.check(HttpError): response = failure.value.response loader.add_value("dl_slot", response.request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", response.status) loader.add_value("ID", response.request.meta["ID"]) loader.add_value("links", "") loader.add_value("alias", "") yield loader.load_item() elif failure.check(DNSLookupError): request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", "DNS") loader.add_value("ID", request.meta["ID"]) loader.add_value("links", "") loader.add_value("alias", "") yield loader.load_item() elif failure.check(TimeoutError, TCPTimedOutError): request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", "Timeout") loader.add_value("ID", request.meta["ID"]) loader.add_value("links", "") loader.add_value("alias", "") yield loader.load_item() else: request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", "other") loader.add_value("ID", request.meta["ID"]) loader.add_value("links", "") loader.add_value("alias", "") yield loader.load_item() ################################################################## # MAIN PARSE ##################################################################
Example #9
Source File: textspider.py From ARGUS with GNU General Public License v3.0 | 4 votes |
def errorback(self, failure): loader = ItemLoader(item=Collector()) if failure.check(HttpError): response = failure.value.response loader.add_value("dl_slot", response.request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("title", "") loader.add_value("description", "") loader.add_value("keywords", "") loader.add_value("error", response.status) loader.add_value("ID", response.request.meta["ID"]) yield loader.load_item() elif failure.check(DNSLookupError): request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("title", "") loader.add_value("description", "") loader.add_value("keywords", "") loader.add_value("error", "DNS") loader.add_value("ID", request.meta["ID"]) yield loader.load_item() elif failure.check(TimeoutError, TCPTimedOutError): request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("title", "") loader.add_value("description", "") loader.add_value("keywords", "") loader.add_value("error", "Timeout") loader.add_value("ID", request.meta["ID"]) yield loader.load_item() else: request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("title", "") loader.add_value("description", "") loader.add_value("keywords", "") loader.add_value("error", "other") loader.add_value("ID", request.meta["ID"]) yield loader.load_item() ################################################################## # MAIN PARSE ##################################################################
Example #10
Source File: linkspider.py From ARGUS with GNU General Public License v3.0 | 4 votes |
def errorback(self, failure): loader = ItemLoader(item=LinkCollector()) if failure.check(HttpError): response = failure.value.response loader.add_value("dl_slot", response.request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", response.status) loader.add_value("ID", response.request.meta["ID"]) loader.add_value("links", "") loader.add_value("alias", "") yield loader.load_item() elif failure.check(DNSLookupError): request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", "DNS") loader.add_value("ID", request.meta["ID"]) loader.add_value("links", "") loader.add_value("alias", "") yield loader.load_item() elif failure.check(TimeoutError, TCPTimedOutError): request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", "Timeout") loader.add_value("ID", request.meta["ID"]) loader.add_value("links", "") loader.add_value("alias", "") yield loader.load_item() else: request = failure.request loader.add_value("dl_slot", request.meta.get('download_slot')) loader.add_value("start_page", "") loader.add_value("scraped_urls", "") loader.add_value("redirect", [None]) loader.add_value("scraped_text", "") loader.add_value("error", "other") loader.add_value("ID", request.meta["ID"]) loader.add_value("links", "") loader.add_value("alias", "") yield loader.load_item() ################################################################## # MAIN PARSE ##################################################################