Python urllib2.URLError() Examples
The following are 30
code examples of urllib2.URLError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
urllib2
, or try the search function
.
Example #1
Source File: can_haz_image.py From macops with Apache License 2.0 | 7 votes |
def DownloadFile(self, fileurl, dlfile): """Downloads a given file to a given path/filename. Args: fileurl: String with URL of file to download. dlfile: String with path of file to be written to. Raises: OSError: If file cannot be opened/written to, function raises OSError. URLError: If URL cannot be opened, fucntion raises URLError. """ if not os.path.isfile(dlfile) or dlfile == TMPINDEX: print 'Downloading %s ...' % fileurl file_to_dl = urllib2.urlopen(fileurl) tmpfile = open(dlfile, 'wb') shutil.copyfileobj(file_to_dl, tmpfile) else: print '%s exists' % dlfile
Example #2
Source File: api_server.py From browserscope with Apache License 2.0 | 6 votes |
def Quit(self, timeout=5.0): """Causes the API Server process to exit. Args: timeout: The maximum number of seconds to wait for an orderly shutdown before forceably killing the process. """ assert self._process, 'server was not started' if self._process.poll() is None: try: urllib2.urlopen(self.url + QUIT_PATH) except urllib2.URLError: pass finish_time = time.time() + timeout while time.time() < finish_time and self._process.poll() is None: time.sleep(0.2) if self._process.returncode is None: logging.warning('api_server did not quit cleanly, killing') self._process.kill()
Example #3
Source File: utils.py From script.module.inputstreamhelper with MIT License | 6 votes |
def _http_request(url, headers=None, time_out=10): """Perform an HTTP request and return request""" log(0, 'Request URL: {url}', url=url) try: if headers: request = Request(url, headers=headers) else: request = Request(url) req = urlopen(request, timeout=time_out) log(0, 'Response code: {code}', code=req.getcode()) if 400 <= req.getcode() < 600: raise HTTPError('HTTP %s Error for url: %s' % (req.getcode(), url), response=req) except (HTTPError, URLError) as err: log(2, 'Download failed with error {}'.format(err)) if yesno_dialog(localize(30004), '{line1}\n{line2}'.format(line1=localize(30063), line2=localize(30065))): # Internet down, try again? return _http_request(url, headers, time_out) return None return req
Example #4
Source File: search.py From sqliv with GNU General Public License v3.0 | 6 votes |
def search(self, query, pages=10): """search and return an array of urls""" urls = [] try: for url in google.search(query, start=0, stop=pages): urls.append(url) except HTTPError: exit("[503] Service Unreachable") except URLError: exit("[504] Gateway Timeout") except: exit("Unknown error occurred") else: return urls
Example #5
Source File: github.py From fusesoc with BSD 2-Clause "Simplified" License | 6 votes |
def _checkout(self, local_dir): user = self.config.get("user") repo = self.config.get("repo") version = self.config.get("version", "master") # TODO : Sanitize URL url = URL.format(user=user, repo=repo, version=version) logger.info("Downloading {}/{} from github".format(user, repo)) try: (filename, headers) = urllib.urlretrieve(url) except URLError as e: raise RuntimeError("Failed to download '{}'. '{}'".format(url, e.reason)) t = tarfile.open(filename) (cache_root, core) = os.path.split(local_dir) # Ugly hack to get the first part of the directory name of the extracted files tmp = t.getnames()[0] t.extractall(cache_root) os.rename(os.path.join(cache_root, tmp), os.path.join(cache_root, core))
Example #6
Source File: net.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def validate_(self, value, context=None): url = self.valid_url(value) if not url: raise StopValidationError(self.messages['invalid_url']) if self.verify_exists: url_string = urlquote(urlunsplit(( url['scheme'], (url['host6'] or url['host4'] or url['hostn_enc']) + ':' + (url['port'] or ''), url['path'], url['query'], url['frag']) ).encode('utf-8'), safe=VALID_CHAR_STRING) try: urlopen(url_string) except URLError: raise StopValidationError(self.messages['not_found'])
Example #7
Source File: net.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def validate_(self, value, context=None): url = self.valid_url(value) if not url: raise StopValidationError(self.messages['invalid_url']) if self.verify_exists: url_string = urlquote(urlunsplit(( url['scheme'], (url['host6'] or url['host4'] or url['hostn_enc']) + ':' + (url['port'] or ''), url['path'], url['query'], url['frag']) ).encode('utf-8'), safe=VALID_CHAR_STRING) try: urlopen(url_string) except URLError: raise StopValidationError(self.messages['not_found'])
Example #8
Source File: build.py From llvm-zorg with Apache License 2.0 | 6 votes |
def http_download(url, dest): """Safely download url to dest. Print error and exit if download fails. """ try: print("GETting", url, "to", dest, "...", end=' ') f = urlopen(url) # Open our local file for writing with open(dest, "wb") as local_file: local_file.write(f.read()) except HTTPError as e: print() print("HTTP Error:", e.code, url) sys.exit(1) except URLError as e: print() print("URL Error:", e.reason, url) sys.exit(1) print("done.")
Example #9
Source File: sbmit.py From hack4career with Apache License 2.0 | 6 votes |
def do_scan(crawling): while 1: try: crawling = tocrawl.pop() # print crawling except KeyError: sys.exit(1) url = urlparse.urlparse(crawling) try: response = urllib2.urlopen(crawling) except urllib2.HTTPError, e: continue except urllib2.URLError, e: log_file = "sqli.txt" FILE = open(log_file, "a") FILE.write(crawling) FILE.close() print "\n================================================================================" print "\t\tBlind MySQL Injection Detected" print crawling print "\n===============================================================================\n" winsound.PlaySound("SystemAsterisk", winsound.SND_ALIAS) time.sleep(10) continue
Example #10
Source File: cloudfrunt.py From cloudfrunt with MIT License | 6 votes |
def find_cf_issues(domains): error_domains = [] for domain in domains: try: response = urlopen('http://' + domain) except HTTPError as e: if e.code == 403 and 'Bad request' in e.fp.read(): try: response = urlopen('https://' + domain) except URLError as e: if 'handshake' in str(e).lower() or e.code == 403 and 'Bad request' in e.fp.read(): error_domains.append(domain) except: pass except: pass return error_domains # add a domain to CloudFront
Example #11
Source File: netatmo-parser.py From rainmachine-developer-resources with GNU General Public License v3.0 | 6 votes |
def postRequest(self, url, params): params = urlencode(params) headers = {"Content-Type" : "application/x-www-form-urlencoded;charset=utf-8"} req = urllib2.Request(url=url, data=params, headers=headers) try: response = urllib2.urlopen(req) log.debug("%s?%s" % (response.geturl(), params)) return json.loads(response.read()) except urllib2.URLError, e: log.debug(e) if hasattr(ssl, '_create_unverified_context'): #for mac os only in order to ignore invalid certificates try: context = ssl._create_unverified_context() response = urllib2.urlopen(req, context=context) return json.loads(response.read()) except Exception, e: log.exception(e)
Example #12
Source File: zooqle.py From search-plugins with GNU General Public License v2.0 | 6 votes |
def retrieve_url_nodecode(url): """ Return the content of the url page as a string """ req = Request(url, headers=headers) try: response = urlopen(req) except URLError as errno: print(" ".join(("Connection error:", str(errno.reason)))) print(" ".join(("URL:", url))) return "" dat = response.read() # Check if it is gzipped if dat[:2] == '\037\213': # Data is gzip encoded, decode it compressedstream = StringIO(dat) gzipper = gzip.GzipFile(fileobj=compressedstream) extracted_data = gzipper.read() dat = extracted_data return dat return dat
Example #13
Source File: zooqle.py From search-plugins with GNU General Public License v2.0 | 6 votes |
def retrieve_url_nodecode(url): """ Return the content of the url page as a string """ req = Request(url, headers=headers) try: response = urlopen(req) except URLError as errno: print(" ".join(("Connection error:", str(errno.reason)))) print(" ".join(("URL:", url))) return "" dat = response.read() # Check if it is gzipped if dat[:2] == '\037\213': # Data is gzip encoded, decode it compressedstream = StringIO(dat) gzipper = gzip.GzipFile(fileobj=compressedstream) extracted_data = gzipper.read() dat = extracted_data return dat return dat
Example #14
Source File: cloudfrunt.py From cloudfrunt with MIT License | 6 votes |
def get_cf_ranges(cf_url): response = None ranges = [] while response is None: try: response = urlopen(cf_url) except URLError as e: print(' [?] Got URLError trying to get CloudFront IP ranges. Retrying...') except: print(' [?] Got an unexpected error trying to get CloudFront IP ranges. Exiting...') raise cf_data = json.load(response) for item in cf_data['prefixes']: service = item.get('service') if service == 'CLOUDFRONT': ranges.append(item.get('ip_prefix')) return ranges # find more domains and correct for CloudFront
Example #15
Source File: run.py From github-stats with MIT License | 6 votes |
def check_for_update(): if os.path.exists(FILE_UPDATE): mtime = os.path.getmtime(FILE_UPDATE) last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d') today = datetime.utcnow().strftime('%Y-%m-%d') if last == today: return try: with open(FILE_UPDATE, 'a'): os.utime(FILE_UPDATE, None) request = urllib2.Request( CORE_VERSION_URL, urllib.urlencode({'version': __version__}), ) response = urllib2.urlopen(request) with open(FILE_UPDATE, 'w') as update_json: update_json.write(response.read()) except (urllib2.HTTPError, urllib2.URLError): pass
Example #16
Source File: utils.py From cloudify-manager-blueprints with Apache License 2.0 | 6 votes |
def http_request(url, data=None, method='PUT', headers=None, timeout=None, should_fail=False): headers = headers or {} request = urllib2.Request(url, data=data, headers=headers) request.get_method = lambda: method try: if timeout: return urllib2.urlopen(request, timeout=timeout) return urllib2.urlopen(request) except urllib2.URLError as e: if not should_fail: ctx.logger.error('Failed to {0} {1} (reason: {2})'.format( method, url, e.reason))
Example #17
Source File: test__datasource.py From lambda-packs with MIT License | 5 votes |
def urlopen_stub(url, data=None): '''Stub to replace urlopen for testing.''' if url == valid_httpurl(): tmpfile = NamedTemporaryFile(prefix='urltmp_') return tmpfile else: raise URLError('Name or service not known') # setup and teardown
Example #18
Source File: test_urllib2net.py From ironpython2 with Apache License 2.0 | 5 votes |
def test_ftp(self): urls = [ 'ftp://www.pythontest.net/README', ('ftp://www.pythontest.net/non-existent-file', None, urllib2.URLError), ] self._test_urls(urls, self._extra_handlers())
Example #19
Source File: turn-power-on.py From sysadmin-tools with Apache License 2.0 | 5 votes |
def send_post_request(uri, creds, payload, headers): try: requests.post(uri, data=json.dumps(payload), headers=headers, verify=False, auth=(creds['user'], creds['pswd'])) except HTTPError as e: return {'ret': False, 'msg': "HTTP Error: %s" % e.code} except URLError as e: return {'ret': False, 'msg': "URL Error: %s" % e.reason} # Almost all errors should be caught above, but just in case except: return {'ret': False, 'msg': "Error"} return {'ret': True}
Example #20
Source File: test_urllib2net.py From ironpython2 with Apache License 2.0 | 5 votes |
def _test_urls(self, urls, handlers, retry=True): import time import logging debug = logging.getLogger("test_urllib2").debug urlopen = urllib2.build_opener(*handlers).open if retry: urlopen = _wrap_with_retry_thrice(urlopen, urllib2.URLError) for url in urls: if isinstance(url, tuple): url, req, expected_err = url else: req = expected_err = None with test_support.transient_internet(url): debug(url) try: f = urlopen(url, req, TIMEOUT) except EnvironmentError as err: debug(err) if expected_err: msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" % (expected_err, url, req, type(err), err)) self.assertIsInstance(err, expected_err, msg) except urllib2.URLError as err: if isinstance(err[0], socket.timeout): print >>sys.stderr, "<timeout: %s>" % url continue else: raise else: try: with test_support.transient_internet(url): buf = f.read() debug("read %d bytes" % len(buf)) except socket.timeout: print >>sys.stderr, "<timeout: %s>" % url f.close() debug("******** next url coming up...") time.sleep(0.1)
Example #21
Source File: test_urllib2_localnet.py From ironpython2 with Apache License 2.0 | 5 votes |
def test_proxy_qop_auth_int_works_or_throws_urlerror(self): self.proxy_digest_handler.add_password(self.REALM, self.URL, self.USER, self.PASSWD) self.digest_auth_handler.set_qop("auth-int") try: result = self.opener.open(self.URL) except urllib2.URLError: # It's okay if we don't support auth-int, but we certainly # shouldn't receive any kind of exception here other than # a URLError. result = None if result: while result.read(): pass result.close()
Example #22
Source File: turn-power-on.py From sysadmin-tools with Apache License 2.0 | 5 votes |
def send_get_request(uri, creds): try: resp = requests.get(uri, verify=False, auth=(creds['user'], creds['pswd'])) data = resp.json() except HTTPError as e: return {'ret': False, 'msg': "HTTP Error: %s" % e.code} except URLError as e: return {'ret': False, 'msg': "URL Error: %s" % e.reason} # Almost all errors should be caught above, but just in case except: return {'ret': False, 'msg': "Error"} return {'ret': True, 'data': data}
Example #23
Source File: turn-power-off.py From sysadmin-tools with Apache License 2.0 | 5 votes |
def send_post_request(uri, creds, payload, headers): try: requests.post(uri, data=json.dumps(payload), headers=headers, verify=False, auth=(creds['user'], creds['pswd'])) except HTTPError as e: return {'ret': False, 'msg': "HTTP Error: %s" % e.code} except URLError as e: return {'ret': False, 'msg': "URL Error: %s" % e.reason} # Almost all errors should be caught above, but just in case except: return {'ret': False, 'msg': "Error"} return {'ret': True}
Example #24
Source File: turn-power-off.py From sysadmin-tools with Apache License 2.0 | 5 votes |
def send_get_request(uri, creds): try: resp = requests.get(uri, verify=False, auth=(creds['user'], creds['pswd'])) data = resp.json() except HTTPError as e: return {'ret': False, 'msg': "HTTP Error: %s" % e.code} except URLError as e: return {'ret': False, 'msg': "URL Error: %s" % e.reason} # Almost all errors should be caught above, but just in case except: return {'ret': False, 'msg': "Error"} return {'ret': True, 'data': data}
Example #25
Source File: get-power-state.py From sysadmin-tools with Apache License 2.0 | 5 votes |
def send_get_request(uri, creds): try: resp = requests.get(uri, verify=False, auth=(creds['user'], creds['pswd'])) data = resp.json() except HTTPError as e: return {'ret': False, 'msg': "HTTP Error: %s" % e.code} except URLError as e: return {'ret': False, 'msg': "URL Error: %s" % e.reason} # Almost all errors should be caught above, but just in case except: return {'ret': False, 'msg': "Error"} return {'ret': True, 'data': data}
Example #26
Source File: app.py From slack-pokerbot with Apache License 2.0 | 5 votes |
def send_delayed_message(url, message): """Send a delayed in_channel message. You can send up to 5 messages per user command. """ req = urllib2.Request(url) req.add_header('Content-Type', 'application/json') try: response = urllib2.urlopen(req, json.dumps(message.get_message())) except urllib2.URLError: logger.error("Could not send delayed message to %s", url)
Example #27
Source File: connection.py From python-mysql-pool with MIT License | 5 votes |
def request(self, host, handler, request_body, verbose=0): """Send XMLRPC request""" uri = '{scheme}://{host}{handler}'.format(scheme=self._scheme, host=host, handler=handler) if self._passmgr: self._passmgr.add_password(None, uri, self._username, self._password) if self.verbose: _LOGGER.debug("FabricTransport: {0}".format(uri)) opener = urllib2.build_opener(*self._handlers) headers = { 'Content-Type': 'text/xml', 'User-Agent': self.user_agent, } req = urllib2.Request(uri, request_body, headers=headers) try: return self.parse_response(opener.open(req)) except (urllib2.URLError, urllib2.HTTPError) as exc: try: code = -1 if exc.code == 400: reason = 'Permission denied' code = exc.code else: reason = exc.reason msg = "{reason} ({code})".format(reason=reason, code=code) except AttributeError: if 'SSL' in str(exc): msg = "SSL error" else: msg = str(exc) raise InterfaceError("Connection with Fabric failed: " + msg) except BadStatusLine: raise InterfaceError("Connection with Fabric failed: check SSL")
Example #28
Source File: run.py From github-stats with MIT License | 5 votes |
def internet_on(): try: urllib2.urlopen(INTERNET_TEST_URL, timeout=2) return True except (urllib2.URLError, socket.timeout): return False
Example #29
Source File: push.py From RF-Monitor with GNU General Public License v2.0 | 5 votes |
def __send(self, uri, data): req = urllib2.Request(uri) req.add_header('Content-Type', 'application/json') event = None try: urllib2.urlopen(req, data) except ValueError as error: event = Event(Events.PUSH_ERROR, msg=error.message) except URLError as error: event = Event(Events.PUSH_ERROR, msg=error.reason.strerror) if event is not None: self._failed.append(data) post_event(self._handler, event)
Example #30
Source File: getobj.py From spider with Apache License 2.0 | 5 votes |
def gethtml(self): request = urllib2.Request(self.url,headers=self.send_headers) try: soures_home = self.opener.open(request).read() except urllib2.URLError,e: return None