Python urllib.request.URLError() Examples
The following are 30
code examples of urllib.request.URLError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
urllib.request
, or try the search function
.
Example #1
Source File: connection.py From cangibrina with GNU General Public License v2.0 | 6 votes |
def HTTPcode(self): try: if self.agent == True: br = Browser() UserAgent = "Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0" header = {"User-Agent" : UserAgent} br.set_handle_robots(False) br.addheaders = [("User-agent", "Fifefox")] resp = br.open(self.target).code else: resp = u.urlopen(self.target).getcode() return(resp) except (u.HTTPError, u.URLError): return(404)
Example #2
Source File: nsidc_icesat2_associated.py From read-ICESat-2 with MIT License | 6 votes |
def http_pull_file(remote_file,remote_mtime,local_file,MODE): #-- Printing files transferred print('{0} -->\n\t{1}\n'.format(remote_file,local_file)) #-- Create and submit request. There are a wide range of exceptions #-- that can be thrown here, including HTTPError and URLError. request = urllib2.Request(remote_file) response = urllib2.urlopen(request) #-- chunked transfer encoding size CHUNK = 16 * 1024 #-- copy contents to local file using chunked transfer encoding #-- transfer should work properly with ascii and binary data formats with open(local_file, 'wb') as f: shutil.copyfileobj(response, f, CHUNK) #-- keep remote modification time of file and local access time os.utime(local_file, (os.stat(local_file).st_atime, remote_mtime)) os.chmod(local_file, MODE) #-- PURPOSE: help module to describe the optional input parameters
Example #3
Source File: extract_test_stats.py From citest with Apache License 2.0 | 6 votes |
def write_to_influx_db(url, db_name, summaries): """Write the extracted summaries to influxdb.""" metrics = [] target = url + '/write?db=%s' % db_name for suite in summaries: common_tags = {'suite': suite['suite']} common_tags.update(suite.get('config', {})) for test in suite['tests']: common_tags['test'] = test['name'] for attempt in test['attempts']: metrics.extend(encode_attempt_metrics(attempt, common_tags)) payload = '\n'.join(metrics) req = Request(url=target, data=payload) req.get_method = lambda: 'POST' try: urlopen(req) print('WROTE %d metrics to %s' % (len(metrics), target)) except URLError as err: print('ERROR: %s\n%s' % (err.reason, err.read())) raise
Example #4
Source File: UsageReport.py From Controllers with MIT License | 6 votes |
def submit_report(self, report_data): data = json.dumps(report_data).encode('utf8') self.register_cbt("Logger", "LOG_DEBUG", "Usage report data: {0}".format(data)) url = None try: url = "http://" + self._cm_config["ServerAddress"] + ":" + \ str(self._cm_config["ServerPort"]) + "/api/submit" req = urllib2.Request(url=url, data=data) req.add_header("Content-Type", "application/json") res = urllib2.urlopen(req) if res.getcode() == 200: log = "Usage report successfully submitted to server {0}\n" \ "HTTP response code:{1}, msg:{2}" \ .format(url, res.getcode(), res.read()) self.register_cbt("Logger", "LOG_INFO", log) else: self.register_cbt("Logger", "LOG_WARNING", "Usage report server indicated error " "code: {0}".format(res.getcode())) except (urllib2.HTTPError, urllib2.URLError) as error: log = "Usage report submission failed to server {0}. " \ "Error: {1}".format(url, error) self.register_cbt("Logger", "LOG_WARNING", log)
Example #5
Source File: tophub.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def check_backend(backend): """Check whether have pre-tuned parameters of the certain target. If not, will download it. Parameters ---------- backend: str The name of backend. """ backend = _alias(backend) assert backend in PACKAGE_VERSION, 'Cannot find backend "%s" in TopHub' % backend version = PACKAGE_VERSION[backend] package_name = "%s_%s.log" % (backend, version) if os.path.isfile(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)): return if sys.version_info >= (3,): import urllib.request as urllib2 else: import urllib2 try: download_package(package_name) except urllib2.URLError as e: logging.warning("Failed to download tophub package for %s: %s", backend, e)
Example #6
Source File: zooqle.py From search-plugins with GNU General Public License v2.0 | 6 votes |
def retrieve_url_nodecode(url): """ Return the content of the url page as a string """ req = Request(url, headers=headers) try: response = urlopen(req) except URLError as errno: print(" ".join(("Connection error:", str(errno.reason)))) print(" ".join(("URL:", url))) return "" dat = response.read() # Check if it is gzipped if dat[:2] == '\037\213': # Data is gzip encoded, decode it compressedstream = StringIO(dat) gzipper = gzip.GzipFile(fileobj=compressedstream) extracted_data = gzipper.read() dat = extracted_data return dat return dat
Example #7
Source File: zooqle.py From search-plugins with GNU General Public License v2.0 | 6 votes |
def retrieve_url_nodecode(url): """ Return the content of the url page as a string """ req = Request(url, headers=headers) try: response = urlopen(req) except URLError as errno: print(" ".join(("Connection error:", str(errno.reason)))) print(" ".join(("URL:", url))) return "" dat = response.read() # Check if it is gzipped if dat[:2] == '\037\213': # Data is gzip encoded, decode it compressedstream = StringIO(dat) gzipper = gzip.GzipFile(fileobj=compressedstream) extracted_data = gzipper.read() dat = extracted_data return dat return dat
Example #8
Source File: downloader.py From pdfx with Apache License 2.0 | 6 votes |
def get_status_code(url): """ Perform HEAD request and return status code """ try: request = Request(sanitize_url(url)) request.add_header("User-Agent", "Mozilla/5.0 (compatible; MSIE 9.0; " "Windows NT 6.1; Trident/5.0)") request.get_method = lambda: 'HEAD' response = urlopen(request, context=ssl_unverified_context) # print response.info() return response.getcode() except HTTPError as e: return e.code except URLError as e: return e.reason except Exception as e: print(e, url) return None
Example #9
Source File: magicmirrorplatform.py From AlexaPi with MIT License | 6 votes |
def mm_heartbeat(self): # Check if stop or set next timer if self.shutdown: return threading.Timer(self.hb_timer, self.mm_heartbeat).start() address = ("http://" + self.mm_host + ":" + self.mm_port + "/alexapi?action=AVSHB") logger.debug("Sending MM Heatbeat") try: response = urlopen(address).read() except URLError as err: logger.error("URLError: %s", err.reason) return logger.debug("Response: %s", response)
Example #10
Source File: tests.py From python-osrm with MIT License | 6 votes |
def test_non_existing_host(self): Profile = osrm.RequestConfig("localhost/v1/flying") self.assertEqual(Profile.host, "localhost") with self.assertRaises(URLError): osrm.nearest((12.36, 45.36), url_config=Profile) with self.assertRaises(URLError): osrm.trip( [(13.38886, 52.51703), (10.00, 53.55), (52.374444, 9.738611)], url_config=Profile) with self.assertRaises(URLError): osrm.simple_route( (13.38886, 52.51703), (10.00, 53.55), url_config=Profile) with self.assertRaises(URLError): osrm.AccessIsochrone( (13.38886, 52.51703), points_grid=100, url_config=Profile) with self.assertRaises(URLError): osrm.match( [(10.00, 53.55), (52.374444, 9.738611)], url_config=Profile) with self.assertRaises(URLError): osrm.table( [(10.00, 53.55), (52.374444, 9.738611)], [(10.00, 53.55), (52.374444, 9.738611)], url_config=Profile)
Example #11
Source File: recipe-578957.py From code with MIT License | 5 votes |
def s3_open(self, req): # The implementation was inspired mainly by the code behind # urllib.request.FileHandler.file_open(). bucket_name = req.host key_name = url2pathname(req.selector)[1:] if not bucket_name or not key_name: raise URLError('url must be in the format s3://<bucket>/<key>') try: conn = self._conn except AttributeError: conn = self._conn = boto.s3.connection.S3Connection() bucket = conn.get_bucket(bucket_name, validate=False) key = bucket.get_key(key_name) origurl = 's3://{}/{}'.format(bucket_name, key_name) if key is None: raise URLError('no such resource: {}'.format(origurl)) headers = [ ('Content-type', key.content_type), ('Content-encoding', key.content_encoding), ('Content-language', key.content_language), ('Content-length', key.size), ('Etag', key.etag), ('Last-modified', key.last_modified), ] headers = email.message_from_string( '\n'.join('{}: {}'.format(key, value) for key, value in headers if value is not None)) return addinfourl(_FileLikeKey(key), headers, origurl)
Example #12
Source File: http_test.py From exactonline with GNU Lesser General Public License v3.0 | 5 votes |
def test_https_with_disallowed_real_secure(self): # This should fail because we use a custom cacert file which won't # contain the real cert. my_opt = Options() my_opt.cacert_file = path.join( path.dirname(__file__), 'http_testserver.crt') my_opt = opt_secure | my_opt self.assertRaises(request.URLError, http_get, 'https://api.github.com/', opt=my_opt) # ; Python23 compatibility helpers
Example #13
Source File: internet.py From MouseTracks with GNU General Public License v3.0 | 5 votes |
def send_request(url, timeout=None, output=False): """Send URL request.""" if output: NOTIFY(LANGUAGE.strings['Internet']['Request'], URL=url) try: return urllib2.urlopen(url, timeout=timeout) except (urllib2.URLError, urllib2.HTTPError): return None
Example #14
Source File: github_release.py From r-bridge-install with Apache License 2.0 | 5 votes |
def save_url(url, output_path): """Save a URL to disk.""" valid_types = ['application/zip', 'application/octet-stream'] r = None for _ in range(5): try: r = request.urlopen(url) break except request.HTTPError as e: reason = "None given" if e.reason: reason = e.reason arcpy.AddError("Unable to access '{}', (reason: {}).".format( url, reason)) except request.URLError as e: arcpy.AddWarning("Access failed, trying again.") # retry all URLErrors time.sleep(3) if r and r.headers['content-type'] in valid_types and r.code == 200: arcpy.AddMessage("Saving URL to '{}'".format(output_path)) with open(output_path, 'wb') as f: f.write(r.read()) else: arcpy.AddError("Unable to access '{}', invalid content.".format(url)) if r: arcpy.AddError("Content type: {}, response code: {}".format( r.headers['content-type'], r.code)) msg = "Either a connectivity issue or restrictions on downloading " + \ "prevented the tool from downloading. Please download the " + \ "zip manually from {}".format(latest_url) + " and move it to " + \ "the same location as this toolbox." arcpy.AddError(msg)
Example #15
Source File: github_release.py From r-bridge-install with Apache License 2.0 | 5 votes |
def parse_json_url(url): """Parse and return a JSON response from a URL.""" res = None r = None err_msg = None for _ in range(5): try: r = request.urlopen(url) if r.code == 200: # urllib doesn't know bytestreams str_response = r.read().decode('utf-8') res = json.loads(str_response) break else: err_msg = "Unable to access'{}', invalid response.".format(url) except request.URLError as e: err_msg = "Unable to access'{}', error: {}.".format(url, e.reason) except LookupError as e: err_msg = "Unable to access'{}', lookup error: {}.".format( url, e.reason) time.sleep(3) if err_msg: arcpy.AddWarning(err_msg) return res
Example #16
Source File: bandwidth_test.py From dataserv-client with MIT License | 5 votes |
def catch_request(request): """Helper function to catch common exceptions encountered when establishing a connection with a HTTP/HTTPS request """ try: uh = urlopen(request) return uh, False except (HTTPError, URLError, socket.error): e = sys.exc_info()[1] return None, e
Example #17
Source File: http_test.py From exactonline with GNU Lesser General Public License v3.0 | 5 votes |
def test_https_with_self_signed(self): # This should fail, because the testserver uses a self-signed # certificate. server = HttpTestServer('GET', '200', 'ssl', use_ssl=True) self.assertRaises(request.URLError, http_get, 'https://localhost:%d/path' % (server.port,), opt=opt_secure) server.join()
Example #18
Source File: bandwidth_test.py From dataserv-client with MIT License | 5 votes |
def getBestServer(servers): """Perform a speedtest.net latency request to determine which speedtest.net server has the lowest latency """ results = {} for server in servers: cum = [] url = '%s/latency.txt' % os.path.dirname(server['url']) urlparts = urlparse(url) for i in range(0, 3): try: if urlparts[0] == 'https': h = HTTPSConnection(urlparts[1]) else: h = HTTPConnection(urlparts[1]) headers = {'User-Agent': user_agent} start = timeit.default_timer() h.request("GET", urlparts[2], headers=headers) r = h.getresponse() total = (timeit.default_timer() - start) except (HTTPError, URLError, socket.error): cum.append(3600) continue text = r.read(9) if int(r.status) == 200 and text == 'test=test'.encode(): cum.append(total) else: cum.append(3600) h.close() avg = round((sum(cum) / 6) * 1000, 3) results[avg] = server fastest = sorted(results.keys())[0] best = results[fastest] best['latency'] = fastest return best
Example #19
Source File: google_images_download.py From BotHub with Apache License 2.0 | 5 votes |
def download_page(self,url): version = (3, 0) cur_version = sys.version_info if cur_version >= version: # If the Current Version of Python is 3.0 or above try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib.request.Request(url, headers=headers) resp = urllib.request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print("Could not open URL. Please check your internet connection and/or ssl settings \n" "If you are using proxy, make sure your proxy settings is configured correctly") sys.exit() else: # If the Current Version of Python is 2.x try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers=headers) try: response = urllib2.urlopen(req) except URLError: # Handling SSL certificate failed context = ssl._create_unverified_context() response = urlopen(req, context=context) page = response.read() return page except: print("Could not open URL. Please check your internet connection and/or ssl settings \n" "If you are using proxy, make sure your proxy settings is configured correctly") sys.exit() return "Page Not found" # Download Page for more than 100 images
Example #20
Source File: playlist.py From ffplayout-engine with GNU General Public License v3.0 | 5 votes |
def get_playlist(self): if stdin_args.playlist: self.json_file = stdin_args.playlist else: year, month, day = self.list_date.split('-') self.json_file = os.path.join( _playlist.path, year, month, self.list_date + '.json') if '://' in self.json_file: self.json_file = self.json_file.replace('\\', '/') try: req = request.urlopen(self.json_file, timeout=1, context=ssl._create_unverified_context()) b_time = req.headers['last-modified'] temp_time = time.strptime(b_time, "%a, %d %b %Y %H:%M:%S %Z") mod_time = time.mktime(temp_time) if mod_time > self.last_mod_time: self.clip_nodes = valid_json(req) self.last_mod_time = mod_time messenger.info('Open: ' + self.json_file) validate_thread(self.clip_nodes) except (request.URLError, socket.timeout): self.eof_handling('Get playlist from url failed!', False) elif os.path.isfile(self.json_file): # check last modification from playlist mod_time = os.path.getmtime(self.json_file) if mod_time > self.last_mod_time: with open(self.json_file, 'r', encoding='utf-8') as f: self.clip_nodes = valid_json(f) self.last_mod_time = mod_time messenger.info('Open: ' + self.json_file) validate_thread(self.clip_nodes) else: self.clip_nodes = None
Example #21
Source File: networking.py From letsrobot with Apache License 2.0 | 5 votes |
def isInternetConnected(): try: urllib2.urlopen('https://www.google.com', timeout=1) return True except urllib2.URLError as err: return False
Example #22
Source File: googol_images.py From X-tra-Telegram with Apache License 2.0 | 5 votes |
def download_page(self,url): version = (3, 0) cur_version = sys.version_info if cur_version >= version: # If the Current Version of Python is 3.0 or above try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib.request.Request(url, headers=headers) resp = urllib.request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print("Could not open URL. Please check your internet connection and/or ssl settings \n" "If you are using proxy, make sure your proxy settings is configured correctly") sys.exit() else: # If the Current Version of Python is 2.x try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers=headers) try: response = urllib2.urlopen(req) except URLError: # Handling SSL certificate failed context = ssl._create_unverified_context() response = urlopen(req, context=context) page = response.read() return page except: print("Could not open URL. Please check your internet connection and/or ssl settings \n" "If you are using proxy, make sure your proxy settings is configured correctly") sys.exit() return "Page Not found" # Download Page for more than 100 images
Example #23
Source File: padawan.py From padawan.vim with MIT License | 5 votes |
def DoRequest(self, command, params, data=''): try: return server.sendRequest(command, params, data) except URLError: editor.error("Padawan.php is not running") except Exception as e: editor.error("Error occured {0}".format(e.errno, e.strerror)) return False
Example #24
Source File: default_api.py From mmtf-python with Apache License 2.0 | 5 votes |
def _internet_on(address): """ Check to see if the internet is on by pinging a set address. :param address: the IP or address to hit :return: a boolean - true if can be reached, false if not. """ try: urllib2.urlopen(address, timeout=1) return True except urllib2.URLError as err: return False
Example #25
Source File: google_images_download.py From Skribbl.io-Bot with MIT License | 5 votes |
def download_page(self,url): version = (3, 0) cur_version = sys.version_info if cur_version >= version: # If the Current Version of Python is 3.0 or above try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib.request.Request(url, headers=headers) resp = urllib.request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print("Could not open URL. Please check your internet connection and/or ssl settings") else: # If the Current Version of Python is 2.x try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers=headers) try: response = urllib2.urlopen(req) except URLError: # Handling SSL certificate failed context = ssl._create_unverified_context() response = urlopen(req, context=context) page = response.read() return page except: print("Could not open URL. Please check your internet connection and/or ssl settings") return "Page Not found" # Download Page for more than 100 images
Example #26
Source File: filelister.py From crypto-detector with Apache License 2.0 | 5 votes |
def download_file(url, download_directory): """Download a remote file Args: download_directory: (string) Returns: (string) that path of the file that was just downloaded. If something failed during download, return None Raises: DownloadError """ Output.print_information("Downloading " + url + " ...") parsed_url = urlparse(url) if parsed_url.path in ["/", ""]: file_name = parsed_url.netloc else: file_name = parsed_url.path.split("/")[-1] download_path = abspath(join(download_directory, file_name)) try: with open(download_path, 'wb') as file_object: file_object.write(urlopen(url).read()) return download_path except HTTPError as expn: raise DownloadError("HTTP error code " + str(expn.code) + " while retrieving " \ + url + "\n" + str(expn.reason)) except URLError as expn: raise DownloadError("HTTP URL error while retrieving " + url + "\n" + str(expn.reason)) except Exception as expn: raise DownloadError("Unable to retrieve " + url + "\n" + str(expn))
Example #27
Source File: setup.py From python-chromedriver-binary with MIT License | 5 votes |
def run(self): """ Downloads, unzips and installs chromedriver. If a chromedriver binary is found in PATH it will be copied, otherwise downloaded. """ chromedriver_version='84.0.4147.30' chromedriver_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'chromedriver_binary') chromedriver_filename = find_binary_in_path(get_chromedriver_filename()) if chromedriver_filename and check_version(chromedriver_filename, chromedriver_version): print("\nChromedriver already installed at {}...\n".format(chromedriver_filename)) new_filename = os.path.join(chromedriver_dir, get_chromedriver_filename()) self.copy_file(chromedriver_filename, new_filename) else: chromedriver_bin = get_chromedriver_filename() chromedriver_filename = os.path.join(chromedriver_dir, chromedriver_bin) if not os.path.isfile(chromedriver_filename) or not check_version(chromedriver_filename, chromedriver_version): print("\nDownloading Chromedriver...\n") if not os.path.isdir(chromedriver_dir): os.mkdir(chromedriver_dir) url = get_chromedriver_url(version=chromedriver_version) try: response = urlopen(url) if response.getcode() != 200: raise URLError('Not Found') except URLError: raise RuntimeError('Failed to download chromedriver archive: {}'.format(url)) archive = BytesIO(response.read()) with zipfile.ZipFile(archive) as zip_file: zip_file.extract(chromedriver_bin, chromedriver_dir) else: print("\nChromedriver already installed at {}...\n".format(chromedriver_filename)) if not os.access(chromedriver_filename, os.X_OK): os.chmod(chromedriver_filename, 0o744) build_py.run(self)
Example #28
Source File: tophub.py From incubator-tvm with Apache License 2.0 | 5 votes |
def check_backend(tophub_location, backend): """Check whether have pre-tuned parameters of the certain target. If not, will download it. Parameters ---------- backend: str The name of backend. Returns ---------- success: bool Whether the check is successful. """ backend = _alias(backend) assert backend in PACKAGE_VERSION, 'Cannot find backend "%s" in TopHub' % backend version = PACKAGE_VERSION[backend] package_name = "%s_%s.log" % (backend, version) if os.path.isfile(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)): return True # pylint: disable=import-outside-toplevel if sys.version_info >= (3,): import urllib.request as urllib2 else: import urllib2 try: download_package(tophub_location, package_name) return True except urllib2.URLError as e: logging.warning("Failed to download tophub package for %s: %s", backend, e) return False
Example #29
Source File: Tracker.py From plugin.video.netflix with MIT License | 5 votes |
def open(self, request): try: return urlopen(request) except HTTPError as e: return False except URLError as e: self.cache_request(request) return False
Example #30
Source File: lambda_utils.py From aws-cidr-finder with Apache License 2.0 | 5 votes |
def send_response(event, context, response_status, reason=None, response_data={}): body = { "Status": response_status, "PhysicalResourceId": context.log_stream_name, "StackId": event["StackId"], "RequestId": event["RequestId"], "LogicalResourceId": event["LogicalResourceId"], } print("Responding: {}".format(response_status)) if reason: print(reason) body["Reason"] = reason if response_data: print(response_data) body["Data"] = response_data body = json.dumps(body).encode("utf-8") req = Request(event["ResponseURL"], data=body, headers={ "Content-Length": len(body), "Content-Type": "", }) req.get_method = lambda: "PUT" try: urlopen(req) return True except HTTPError as e: print("Failed executing HTTP request: {}".format(e.code)) return False except URLError as e: print("Failed to reach the server: {}".format(e.reason)) return False