Python urllib2.install_opener() Examples

The following are 30 code examples of urllib2.install_opener(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module urllib2 , or try the search function .
Example #1
Source File: liberty_crawler.py    From agentless-system-crawler with Apache License 2.0 11 votes vote down vote up
def retrieve_status_page(user, password, url):

    try:
        ssl._create_unverified_context
    except AttributeError:
        pass
    else:
        ssl._create_default_https_context = ssl._create_unverified_context

    password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
    password_mgr.add_password(None, url, user, password)
    handler = urllib2.HTTPBasicAuthHandler(password_mgr)
    opener = urllib2.build_opener(handler)
    urllib2.install_opener(opener)

    req = urllib2.Request(url)
    try:
        response = urllib2.urlopen(req)
        return response.read()
    except Exception:
        raise CrawlError("can't access to http://%s", url) 
Example #2
Source File: grabber.py    From ITWSV with MIT License 7 votes vote down vote up
def getContent_GET(url,param,injection):
	global log
	"""
		Get the content of the url by GET method
	"""
	newUrl = url
	ret = None
	if url.find('?') < 0:
		if url[len(url)-1] != '/' and not allowedExtensions(url):
			url += '/'
		newUrl = url + '?' + param + '=' + single_urlencode(str(injection))
	else:
		newUrl = url + '&' + param + '=' + single_urlencode(str(injection))
	try:
		opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
		urllib2.install_opener(opener)
		log <= ( newUrl)
		req = Request(newUrl, None, txheaders) # create a request object
		ret = urlopen(req)                     # and open it to return a handle on the url
		ret = urlopen(req)                     # and open it to return a handle on the url
	except HTTPError, e:
		log <= ( 'The server couldn\'t fulfill the request.')
		log <= ( 'Error code: %s' % e.code)
		return None 
Example #3
Source File: keepalive.py    From NoobSec-Toolkit with GNU General Public License v2.0 6 votes vote down vote up
def error_handler(url):
    global HANDLE_ERRORS
    orig = HANDLE_ERRORS
    keepalive_handler = HTTPHandler()
    opener = urllib2.build_opener(keepalive_handler)
    urllib2.install_opener(opener)
    pos = {0: 'off', 1: 'on'}
    for i in (0, 1):
        print "  fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
        HANDLE_ERRORS = i
        try:
            fo = urllib2.urlopen(url)
            foo = fo.read()
            fo.close()
            try: status, reason = fo.status, fo.reason
            except AttributeError: status, reason = None, None
        except IOError, e:
            print "  EXCEPTION: %s" % e
            raise
        else:
            print "  status = %s, reason = %s" % (status, reason) 
Example #4
Source File: sosac.py    From plugin.video.sosac.ph with GNU General Public License v2.0 6 votes vote down vote up
def probe_html5(self, result):

        class NoRedirectHandler(urllib2.HTTPRedirectHandler):

            def http_error_302(self, req, fp, code, msg, headers):
                infourl = urllib.addinfourl(fp, headers, req.get_full_url())
                infourl.status = code
                infourl.code = code
                return infourl
            http_error_300 = http_error_302
            http_error_301 = http_error_302
            http_error_303 = http_error_302
            http_error_307 = http_error_302

        opener = urllib2.build_opener(NoRedirectHandler())
        urllib2.install_opener(opener)

        r = urllib2.urlopen(urllib2.Request(result['url'], headers=result['headers']))
        if r.code == 200:
            result['url'] = r.read()
        return result 
Example #5
Source File: sport365.py    From bugatsinho.github.io with GNU General Public License v3.0 6 votes vote down vote up
def getUrlrh(url, data=None, header={}, usecookies=True):
    cj = cookielib.LWPCookieJar()
    if usecookies:
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        urllib2.install_opener(opener)
    if not header:
        header = {'User-Agent':UA}
    rh={}
    req = urllib2.Request(url, data, headers=header)
    try:
        response = urllib2.urlopen(req, timeout=15)
        for k in response.headers.keys(): rh[k]=response.headers[k]
        link = response.read()
        response.close()
    except:
        link=''
    c = ''.join(['%s=%s' % (c.name, c.value) for c in cj]) if cj else ''
    return link,rh 
Example #6
Source File: hsecscan.py    From hsecscan with GNU General Public License v2.0 6 votes vote down vote up
def scan(url, redirect, insecure, useragent, postdata, proxy):
    request = urllib2.Request(url.geturl())
    request.add_header('User-Agent', useragent)
    request.add_header('Origin', 'http://hsecscan.com')
    request.add_header('Accept', '*/*')
    if postdata:
        request.add_data(urllib.urlencode(postdata))
    build = [urllib2.HTTPHandler()]
    if redirect:
        build.append(RedirectHandler())
    if proxy:
        build.append(urllib2.ProxyHandler({'http': proxy, 'https': proxy}))
    if insecure:
        context = ssl._create_unverified_context()
        build.append(urllib2.HTTPSHandler(context=context))
    urllib2.install_opener(urllib2.build_opener(*build))
    response = urllib2.urlopen(request)
    print '>> RESPONSE INFO <<'
    print_response(response.geturl(), response.getcode(), response.info())
    print '>> RESPONSE HEADERS DETAILS <<'
    for header in response.info().items():
        check_header(header)
    print '>> RESPONSE MISSING HEADERS <<'
    missing_headers(response.info().items(), url.scheme) 
Example #7
Source File: Youdao-Anki.py    From Anki-Youdao with MIT License 6 votes vote down vote up
def totalPage(self):

        self.loadedCookies = self.loadCookies()
        if not self.loadedCookies:
            return False
        # page index start from 0 end at max-1
        req = urllib2.Request('http://dict.youdao.com/wordbook/wordlist?p=0&tags=')
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.loadedCookies))
        urllib2.install_opener(opener)
        response = urllib2.urlopen(req)
        source = response.read()
        if '密码错误' in source:
            return False
        else:
            try:
                return int(re.search('<a href="wordlist.p=(.*).tags=" class="next-page">最后一页</a>', source, re.M | re.I).group(1)) - 1
            except Exception:
                return 1 
Example #8
Source File: tomcat_crawler.py    From agentless-system-crawler with Apache License 2.0 6 votes vote down vote up
def retrieve_status_page(hostname, port, user, password):
    statusPage = "http://%s:%s/manager/status?XML=true" % (hostname, port)

    password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
    password_mgr.add_password(None, statusPage, user, password)
    handler = urllib2.HTTPBasicAuthHandler(password_mgr)
    opener = urllib2.build_opener(handler)
    urllib2.install_opener(opener)

    req = urllib2.Request(statusPage)
    try:
        response = urllib2.urlopen(req)
        return response.read()
    except Exception:
        raise CrawlError("can't access to http://%s:%s",
                         hostname, port) 
Example #9
Source File: shellshock-hunter.py    From shellshock-hunter with GNU General Public License v2.0 6 votes vote down vote up
def bing_search(query, key, offset, **kwargs):
    ''' Make the search '''
    username = ''
    baseURL = 'https://api.datamarket.azure.com/Bing/Search/'
    query = urllib.quote(query)
    user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)'
    credentials = (':%s' % key).encode('base64')[:-1]
    auth = 'Basic %s' % credentials
    url = baseURL+'Web?Query=%27'+query+'%27&$top=50&$format=json&$skip='+offset
    print '[*] Fetching '+url
    password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
    password_mgr.add_password(None, url, username, key)
    handler = urllib2.HTTPBasicAuthHandler(password_mgr)
    opener = urllib2.build_opener(handler)
    urllib2.install_opener(opener)
    try:
        readURL = urllib2.urlopen(url, timeout=60).read()
    except Exception as e:
        sys.exit('[-] Failed to fetch bing results. Are you sure you have the right API key?\n      Error: '+str(e))
    return readURL 
Example #10
Source File: prep.py    From dart with Apache License 2.0 6 votes vote down vote up
def download_vcpython27(self):
        """
        Download vcpython27 since some Windows 7 boxes have it and some don't.
        :return: None
        """

        self._prepare_for_download()

        logger.info('Beginning download of vcpython27... this may take a few minutes...')

        with open(os.path.join(DOWNLOADS_DIR, 'vcpython27.msi'), 'wb') as f:

            if self.PROXY is not None:
                opener = urllib2.build_opener(
                    urllib2.HTTPHandler(),
                    urllib2.HTTPSHandler(),
                    urllib2.ProxyHandler({'http': self.PROXY, 'https': self.PROXY})
                )
                urllib2.install_opener(opener)

            f.write(urllib2.urlopen(self.VCPYTHON27_DOWNLOAD_URL, timeout=self.DOWNLOAD_TIMEOUT).read())

        logger.debug('Download of vcpython27 complete') 
Example #11
Source File: prep.py    From dart with Apache License 2.0 6 votes vote down vote up
def download_python(self):
        """
        Download Python
        :return: None
        """

        self._prepare_for_download()

        logger.info('Beginning download of python')

        with open(os.path.join(DOWNLOADS_DIR, 'python-installer.msi'), 'wb') as f:

            if self.PROXY is not None:
                opener = urllib2.build_opener(
                    urllib2.HTTPHandler(),
                    urllib2.HTTPSHandler(),
                    urllib2.ProxyHandler({'http': self.PROXY, 'https': self.PROXY})
                )
                urllib2.install_opener(opener)

            f.write(urllib2.urlopen(self.PYTHON_DOWNLOAD_URL, timeout=self.DOWNLOAD_TIMEOUT).read())

        logger.debug('Download of python complete') 
Example #12
Source File: hackUtils.py    From fuzzdb-collect with GNU General Public License v3.0 6 votes vote down vote up
def getUrlRespHtmlByProxy(url,proxy):
    respHtml=''
    try:
        heads = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 
                'Accept-Charset':'GB2312,utf-8;q=0.7,*;q=0.7', 
                'Accept-Language':'zh-cn,zh;q=0.5', 
                'Cache-Control':'max-age=0', 
                'Connection':'keep-alive', 
                'Keep-Alive':'115',
                'User-Agent':'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.14) Gecko/20110221 Ubuntu/10.10 (maverick) Firefox/3.6.14'}
        opener = urllib2.build_opener(urllib2.ProxyHandler({'https':proxy}))
        urllib2.install_opener(opener) 
        req = urllib2.Request(url)
        opener.addheaders = heads.items()
        respHtml = opener.open(req).read()
    except Exception:
        pass
    return respHtml 
Example #13
Source File: grabber.py    From ITWSV with MIT License 6 votes vote down vote up
def getContent_POST(url,param,injection):
	global log
	"""
		Get the content of the url by POST method
	"""
	txdata = urllib.urlencode({param: injection})
	ret = None
	try:
		opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
		urllib2.install_opener(opener)
		log <= ( url)
		log <= ( txdata)
		req = Request(url, txdata, txheaders)  # create a request object
		ret = urlopen(req)                     # and open it to return a handle on the url
		ret = urlopen(req)                     # and open it to return a handle on the url
	except HTTPError, e:
		log <= ( 'The server couldn\'t fulfill the request.')
		log <= ( 'Error code: %s' % e.code)
		return None 
Example #14
Source File: url_request.py    From Belati with GNU General Public License v2.0 6 votes vote down vote up
def just_url_open(self, url_request, proxy_address):
        try:
            if type(proxy_address) is list:
                # Get random proxy from list
                proxy_address_fix = random.choice(proxy_address)
            else:
                proxy_address_fix = proxy_address

            if proxy_address is not "":
                log.console_log("{}[*] Using Proxy Address : {}{}".format(Y, proxy_address_fix, W))

            parse = urlparse(proxy_address_fix)
            proxy_scheme = parse.scheme
            proxy = str(parse.hostname) + ':' + str(parse.port)
            proxy_handler = urllib2.ProxyHandler({ proxy_scheme: proxy})
            opener = urllib2.build_opener(proxy_handler)
            opener.addheaders = [('User-agent', ua.get_user_agent() )]
            urllib2.install_opener(opener)
            req = urllib2.Request(url_request)
            data = urllib2.urlopen(req, timeout=25)
            return data
        except urllib2.HTTPError, e:
                return e.code 
Example #15
Source File: url_request.py    From Belati with GNU General Public License v2.0 6 votes vote down vote up
def header_info(self, url_request, proxy_address):
        try:
            if type(proxy_address) is list:
                # Get random proxy from list
                proxy_address_fix = random.choice(proxy_address)
            else:
                proxy_address_fix = proxy_address

            if proxy_address is not "":
                log.console_log("{}[*] Using Proxy Address : {}{}".format(Y, proxy_address_fix, W))

            parse = urlparse(proxy_address_fix)
            proxy_scheme = parse.scheme
            proxy = str(parse.hostname) + ':' + str(parse.port)
            proxy_handler = urllib2.ProxyHandler({ proxy_scheme: proxy})
            opener = urllib2.build_opener(proxy_handler)
            opener.addheaders = [('User-agent', ua.get_user_agent() )]
            urllib2.install_opener(opener)
            req = urllib2.Request(url_request)
            data = urllib2.urlopen(req).info()
            return data
        except urllib2.HTTPError, e:
            log.console_log('Error code: {}'.format( str(e.code)))
            return e.code 
Example #16
Source File: Belati.py    From Belati with GNU General Public License v2.0 6 votes vote down vote up
def check_single_proxy_status(self, proxy_address, domain_check):
        try:
            parse = urlparse(proxy_address)
            proxy_scheme = parse.scheme
            proxy = str(parse.hostname) + ':' + str(parse.port)
            proxy_handler = urllib2.ProxyHandler({ proxy_scheme: proxy})
            opener = urllib2.build_opener(proxy_handler)
            opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36')]
            urllib2.install_opener(opener)
            req = urllib2.Request(domain_check)
            start_time = time.time()
            sock = urllib2.urlopen(req)
            end_time = time.time()
            diff_time = round(end_time - start_time, 3)
            log.console_log(Y + "{}[+] {} OK! Response Time : {}s".format(Y, proxy_address, str(diff_time), W ))
            return 'ok'
        except urllib2.HTTPError, e:
            print('Error code: ' + str(e.code))
            return e.code 
Example #17
Source File: utils.py    From WeixinBot with Apache License 2.0 6 votes vote down vote up
def set_cookie(cookie_file):
    """
    @brief      Load cookie from file
    @param      cookie_file
    @param      user_agent
    @return     cookie, LWPCookieJar
    """
    cookie = cookielib.LWPCookieJar(cookie_file)
    try:
        cookie.load(ignore_discard=True)
    except:
        Log.error(traceback.format_exc())
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
    opener.addheaders = Constant.HTTP_HEADER_USERAGENT
    urllib2.install_opener(opener)
    return cookie 
Example #18
Source File: grabber.py    From ITWSV with MIT License 6 votes vote down vote up
def getContentDirectURL_POST(url,allParams):
	global log
	"""
		Get the content of the url by POST method
	"""
	txdata = urllib.urlencode(allParams)
	ret = None
	try:
		opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
		urllib2.install_opener(opener)
		log <= ( url)
		log <= ( txdata)
		req = Request(url, txdata, txheaders)  # create a request object
		ret = urlopen(req)                     # and open it to return a handle on the url
		ret = urlopen(req)                     # and open it to return a handle on the url
	except HTTPError, e:
		log <= ( 'The server couldn\'t fulfill the request.')
		log <= ( 'Error code: %s' % e.code)
		return None 
Example #19
Source File: grabber.py    From ITWSV with MIT License 6 votes vote down vote up
def getContentDirectURL_GET(url, string):
	global log
	"""
		Get the content of the url by GET method
	"""
	ret = ""
	try:
		if len(string) > 0:
			if url[len(url)-1] != '/' and url.find('?') < 0  and not allowedExtensions(url):
				url += '/'
			url = url + "?" + (string)
		opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
		urllib2.install_opener(opener)
		log <= ( url)
		req = Request(url, None, txheaders) # create a request object
		ret = urlopen(req)                     # and open it to return a handle on the url
	except HTTPError, e:
		log <= ( 'The server couldn\'t fulfill the request.')
		log <= ( 'Error code: %s' % e.code)
		return None 
Example #20
Source File: hackUtils.py    From fuzzdb-collect with GNU General Public License v3.0 6 votes vote down vote up
def getUrlRespHtml(url):
    respHtml=''
    try:
        heads = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 
                'Accept-Charset':'GB2312,utf-8;q=0.7,*;q=0.7', 
                'Accept-Language':'zh-cn,zh;q=0.5', 
                'Cache-Control':'max-age=0', 
                'Connection':'keep-alive', 
                'Keep-Alive':'115',
                'User-Agent':'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.14) Gecko/20110221 Ubuntu/10.10 (maverick) Firefox/3.6.14'}
     
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
        urllib2.install_opener(opener) 
        req = urllib2.Request(url)
        opener.addheaders = heads.items()
        respHtml = opener.open(req).read()
    except Exception:
        pass
    return respHtml 
Example #21
Source File: urllib2.py    From oss-ftp with MIT License 5 votes vote down vote up
def install_opener(opener):
    global _opener
    _opener = opener

# do these error classes make sense?
# make sure all of the IOError stuff is overridden.  we just want to be
# subtypes. 
Example #22
Source File: test_urllib2_localnet.py    From oss-ftp with MIT License 5 votes vote down vote up
def test_basic_auth_httperror(self):
        ah = urllib2.HTTPBasicAuthHandler()
        ah.add_password(self.REALM, self.server_url, self.USER,
                        self.INCORRECT_PASSWD)
        urllib2.install_opener(urllib2.build_opener(ah))
        self.assertRaises(urllib2.HTTPError, urllib2.urlopen, self.server_url) 
Example #23
Source File: addon.py    From Kodi with GNU Lesser General Public License v3.0 5 votes vote down vote up
def _cookies_init(self):
        if self.CJ is None:
            return

        urllib2.install_opener(
            urllib2.build_opener(
                urllib2.HTTPCookieProcessor(self.CJ)
            )
        )

        self.cookie_path = os.path.join(self.path, 'cookies')
        if not os.path.exists(self.cookie_path):
            os.makedirs(self.cookie_path)
            # print '[%s]: os.makedirs(cookie_path=%s)' % (addon_id, cookie_path) 
Example #24
Source File: app-tool.py    From viewfinder with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    self.cookie_jar = cookielib.LWPCookieJar()
    urllib2.install_opener(
        urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar)))
    self.clients = {} 
Example #25
Source File: app-tool.py    From viewfinder with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    self.cookie_jar = cookielib.LWPCookieJar()
    urllib2.install_opener(
        urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar)))
    self.clients = {} 
Example #26
Source File: urllib2.py    From BinderFilter with MIT License 5 votes vote down vote up
def install_opener(opener):
    global _opener
    _opener = opener

# do these error classes make sense?
# make sure all of the IOError stuff is overridden.  we just want to be
# subtypes. 
Example #27
Source File: vtLookup.py    From ThreatHunting with MIT License 5 votes vote down vote up
def lookup(md5):
  try:
    proxy = urllib2.ProxyHandler({'http': 'proxy:port'})
    opener = urllib2.build_opener(proxy)
    urllib2.install_opener(opener)
    response = urllib2.urlopen('https://www.virustotal.com/vtapi/v2/file/report','apikey=<VTKEY>&resource=' + md5)
    lines = response.read()
    return lines
  except:
    return '' 
Example #28
Source File: logger_base.py    From loggers with GNU General Public License v2.0 5 votes vote down vote up
def do_login(self):
    #set cookie
        cj = cookielib.CookieJar()
        url_login = self.url_login
        form_data = self.form_data_dict
        try:
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
            urllib2.install_opener(opener)
            req = urllib2.Request(url_login, urllib.urlencode(form_data))
            u = urllib2.urlopen(req)
            return u.read().decode('utf-8').encode('gbk')
        except:
            print "Ooops! Failed to log in !>_< there may be a problem."
            return 
Example #29
Source File: urllib2.py    From Computable with MIT License 5 votes vote down vote up
def install_opener(opener):
    global _opener
    _opener = opener

# do these error classes make sense?
# make sure all of the IOError stuff is overridden.  we just want to be
# subtypes. 
Example #30
Source File: test_urllib2_localnet.py    From BinderFilter with MIT License 5 votes vote down vote up
def setUp(self):
        proxy_handler = urllib2.ProxyHandler({})
        opener = urllib2.build_opener(proxy_handler)
        urllib2.install_opener(opener)
        super(TestUrlopen, self).setUp()