Python urllib.request.install_opener() Examples

The following are 25 code examples of urllib.request.install_opener(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module urllib.request , or try the search function .
Example #1
Source File: download.py    From cornac with Apache License 2.0 6 votes vote down vote up
def _urlretrieve(url, fpath):
    """Retrieve data from given url

    Parameters
    ----------
    url: str
        The url to the data.

    fpath: str
        The path to file where data is stored.

    """
    opener = request.build_opener()
    opener.addheaders = [("User-agent", "Mozilla/5.0")]

    with tqdm(unit="B", unit_scale=True) as progress:

        def report(chunk, chunksize, total):
            progress.total = total
            progress.update(chunksize)

        request.install_opener(opener)
        request.urlretrieve(url, fpath, reporthook=report) 
Example #2
Source File: common.py    From acmpv with Do What The F*ck You Want To Public License 6 votes vote down vote up
def get_response(url, faker = False):
    logging.debug('get_response: %s' % url)

    # install cookies
    if cookies:
        opener = request.build_opener(request.HTTPCookieProcessor(cookies))
        request.install_opener(opener)

    if faker:
        response = request.urlopen(request.Request(url, headers = fake_headers), None)
    else:
        response = request.urlopen(url)

    data = response.read()
    if response.info().get('Content-Encoding') == 'gzip':
        data = ungzip(data)
    elif response.info().get('Content-Encoding') == 'deflate':
        data = undeflate(data)
    response.data = data
    return response

# DEPRECATED in favor of get_content() 
Example #3
Source File: common.py    From acmpv with Do What The F*ck You Want To Public License 6 votes vote down vote up
def get_response(url, faker = False):
    logging.debug('get_response: %s' % url)

    # install cookies
    if cookies:
        opener = request.build_opener(request.HTTPCookieProcessor(cookies))
        request.install_opener(opener)

    if faker:
        response = request.urlopen(request.Request(url, headers = fake_headers), None)
    else:
        response = request.urlopen(url)

    data = response.read()
    if response.info().get('Content-Encoding') == 'gzip':
        data = ungzip(data)
    elif response.info().get('Content-Encoding') == 'deflate':
        data = undeflate(data)
    response.data = data
    return response

# DEPRECATED in favor of get_content() 
Example #4
Source File: py3.py    From php-load-test with Do What The F*ck You Want To Public License 6 votes vote down vote up
def check_php_multipartform_dos(url, post_body, headers, ip):
    try:
        proxy_handler = urllib2.ProxyHandler({"http": ip})
        null_proxy_handler = urllib2.ProxyHandler({})
        opener = urllib2.build_opener(proxy_handler)
        urllib2.install_opener(opener)
        req = urllib2.Request(url)
        for key in headers.keys():
            req.add_header(key, headers[key])
        starttime = datetime.datetime.now()
        fd = urllib2.urlopen(req, post_body)
        html = fd.read()
        endtime = datetime.datetime.now()
        usetime = (endtime - starttime).seconds
        if(usetime > 5):
            result = url+" is vulnerable"
        else:
            if(usetime > 3):
                result = "need to check normal respond time"
        return [result, usetime]
    except KeyboardInterrupt:
        exit()
# end 
Example #5
Source File: common.py    From acmpv with Do What The F*ck You Want To Public License 5 votes vote down vote up
def set_http_proxy(proxy):
    if proxy == None: # Use system default setting
        proxy_support = request.ProxyHandler()
    elif proxy == '': # Don't use any proxy
        proxy_support = request.ProxyHandler({})
    else: # Use proxy
        proxy_support = request.ProxyHandler({'http': '%s' % proxy, 'https': '%s' % proxy})
    opener = request.build_opener(proxy_support)
    request.install_opener(opener) 
Example #6
Source File: convertor.py    From pyp2rpm with MIT License 5 votes vote down vote up
def client(self):
        """XMLRPC client for PyPI. Always returns the same instance.

        If the package is provided as a path to compressed source file,
        PyPI will not be used and the client will not be instantiated.

        Returns:
            XMLRPC client for PyPI or None.
        """
        if self.proxy:
            proxyhandler = urllib.ProxyHandler({"http": self.proxy})
            opener = urllib.build_opener(proxyhandler)
            urllib.install_opener(opener)
            transport = ProxyTransport()
        if not hasattr(self, '_client'):
            transport = None
            if self.pypi:
                if self.proxy:
                    logger.info('Using provided proxy: {0}.'.format(
                        self.proxy))
                self._client = xmlrpclib.ServerProxy(settings.PYPI_URL,
                                                     transport=transport)
                self._client_set = True
            else:
                self._client = None

        return self._client 
Example #7
Source File: instabrute.py    From instabrute with GNU General Public License v3.0 5 votes vote down vote up
def get_csrf():
    """
    get CSRF token from login page to use in POST requests
    """
    global csrf_token

    print(bcolors.WARNING + "[+] Getting CSRF Token: " + bcolors.ENDC)

    try:
        opener = rq.build_opener(rq.HTTPHandler(), rq.HTTPSHandler())
        opener.addheaders = [('User-agent', 'Mozilla/5.0')]
        rq.install_opener(opener)

        request = rq.Request('https://www.instagram.com/')
        try:
            # python 2
            headers = rq.urlopen(request).info().headers
        except Exception:
            # python 3
            headers = rq.urlopen(request).info().get_all('Set-Cookie')

        for header in headers:
            if header.find('csrftoken') != -1:
                csrf_token = header.partition(';')[0].partition('=')[2]
                print(bcolors.OKGREEN + "[+] CSRF Token :", csrf_token, "\n" + bcolors.ENDC)
    except Exception as err:
        print(bcolors.FAIL + "[!] Can't get CSRF token , please use -d for debug" + bcolors.ENDC)

        if _debug:
            logger.error(err)

        print(bcolors.FAIL + "[!] Exiting..." + bcolors.ENDC)
        exit(3) 
Example #8
Source File: instabrute.py    From instabrute with GNU General Public License v3.0 5 votes vote down vote up
def check_proxy(q):
    """
    check proxy for and append to working proxies
    :param q:
    """
    if not q.empty():

        proxy = q.get(False)
        proxy = proxy.replace("\r", "").replace("\n", "")

        try:
            opener = rq.build_opener(
                rq.ProxyHandler({'https': 'https://' + proxy}),
                rq.HTTPHandler(),
                rq.HTTPSHandler()
            )

            opener.addheaders = [('User-agent', 'Mozilla/5.0')]
            rq.install_opener(opener)

            req = rq.Request('https://api.ipify.org/')

            if rq.urlopen(req).read().decode() == proxy.partition(':')[0]:
                proxys_working_list.update({proxy: proxy})
                if _verbose:
                    print(bcolors.OKGREEN + " --[+] ", proxy, " | PASS" + bcolors.ENDC)
            else:
                if _verbose:
                    print(" --[!] ", proxy, " | FAILED")

        except Exception as err:
            if _verbose:
                print(" --[!] ", proxy, " | FAILED")
            if _debug:
                logger.error(err)
            pass 
Example #9
Source File: instabrute.py    From BruteSploit with GNU General Public License v3.0 5 votes vote down vote up
def get_csrf():
    """
    get CSRF token from login page to use in POST requests
    """
    global csrf_token

    print(bcolors.WARNING + "[+] Getting CSRF Token: " + bcolors.ENDC)

    try:
        opener = rq.build_opener(rq.HTTPHandler(), rq.HTTPSHandler())
        opener.addheaders = [('User-agent', 'Mozilla/5.0')]
        rq.install_opener(opener)

        request = rq.Request('https://www.instagram.com/')
        try:
            # python 2
            headers = rq.urlopen(request).info().headers
        except Exception:
            # python 3
            headers = rq.urlopen(request).info().get_all('Set-Cookie')

        for header in headers:
            if header.find('csrftoken') != -1:
                csrf_token = header.partition(';')[0].partition('=')[2]
                print(bcolors.OKGREEN + "[+] CSRF Token :", csrf_token, "\n" + bcolors.ENDC)
    except Exception as err:
        print(bcolors.FAIL + "[!] Can't get CSRF token , please use -d for debug" + bcolors.ENDC)

        if _debug:
            logger.error(err)

        print(bcolors.FAIL + "[!] Exiting..." + bcolors.ENDC)
        exit(3) 
Example #10
Source File: instabrute.py    From BruteSploit with GNU General Public License v3.0 5 votes vote down vote up
def check_proxy(q):
    """
    check proxy for and append to working proxies
    :param q:
    """
    if not q.empty():

        proxy = q.get(False)
        proxy = proxy.replace("\r", "").replace("\n", "")

        try:
            opener = rq.build_opener(
                rq.ProxyHandler({'https': 'https://' + proxy}),
                rq.HTTPHandler(),
                rq.HTTPSHandler()
            )

            opener.addheaders = [('User-agent', 'Mozilla/5.0')]
            rq.install_opener(opener)

            req = rq.Request('https://api.ipify.org/')

            if rq.urlopen(req).read().decode() == proxy.partition(':')[0]:
                proxys_working_list.update({proxy: proxy})
                if _verbose:
                    print(bcolors.OKGREEN + " --[+] ", proxy, " | PASS" + bcolors.ENDC)
            else:
                if _verbose:
                    print(" --[!] ", proxy, " | FAILED")

        except Exception as err:
            if _verbose:
                print(" --[!] ", proxy, " | FAILED")
            if _debug:
                logger.error(err)
            pass 
Example #11
Source File: main.py    From bili-box with MIT License 5 votes vote down vote up
def download(self, url):
        print(url)
        if not url:
            return
        ssl._create_default_https_context = ssl._create_unverified_context
        opener = urequest.build_opener()
        opener.addheaders = [
            ("Host", "tx.acgvideo.com"),
            (
                "User-Agent",
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
            ),
            ("Accept", "*/*"),
            ("Accept-Language", "en-US,en;q=0.5"),
            ("Accept-Encoding", "gzip, deflate, br"),
            ("Range", "bytes=0-"),  # Range 的值要为 bytes=0- 才能下载完整视频
            ("Referer", "https://www.bilibili.com/video/av14543079/"),
            ("Origin", "https://www.bilibili.com"),
            ("Connection", "keep-alive"),
        ]
        urequest.install_opener(opener)

        folder = self.dlpath + "/" + url[0].split("?")[0].split("/")[-1].split("-")[0]
        for i, j in enumerate(url):
            filename = j.split("?")[0].split("/")[-1]
            print(f"path: {folder}/{filename}\n")
            if not os.path.exists(folder):
                os.mkdir(folder)
            if os.path.exists(f"{folder}/{filename}"):
                self.updateProgress.emit(int(threading.current_thread().name), 100)
            else:
                urequest.urlretrieve(
                    j, filename=f"{folder}/{filename}", reporthook=self.report
                )
            self.updateSlice.emit(int(threading.current_thread().name), str(i + 1))
        if len(url) > 1:
            self.merge(folder)
        else:
            self.change2mp4(folder) 
Example #12
Source File: setup.py    From OasisLMF with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fetch_ktools_tar(self, location, url, attempts=3, timeout=15, cooldown=1):
        last_error = None
        proxy_config = urlrequest.getproxies()
        self.announce('Retrieving ktools from: {}'.format(url), INFO)
        self.announce('Proxy configuration: {}'.format(proxy_config), INFO)

        if proxy_config:
            # Handle Proxy config
            proxy_handler = urlrequest.ProxyHandler(proxy_config)
            opener = urlrequest.build_opener(proxy_handler)
            urlrequest.install_opener(opener)

        for i in range(attempts):
            try:
                if proxy_config:
                    # Proxied connection
                    req = urlrequest.urlopen(urlrequest.Request(url), timeout=timeout)
                    break
                else:
                    # Non proxied connection
                    req = urlrequest.urlopen(url, timeout=timeout)
                    break

            except URLError as e:
                self.announce('Fetch ktools tar failed: {} (attempt {})'.format(e, (i+1)), WARN)
                last_error = e
                sleep(cooldown)
        else:
            self.announce('Failed to get ktools tar after {} attempts'.format(attempts), ERROR)
            if last_error:
                raise last_error

        with open(location, 'wb') as f:
            f.write(req.read()) 
Example #13
Source File: 刷网页.py    From Python-Code with MIT License 5 votes vote down vote up
def brash(proxy_dict):
    #print(proxy_dict)
    global count
    global count1
    if count1 < 100:
        try:  #正常运行
            count = count + 1
            print(count, 'times')  #监视程序是否在正常运行,输出运行了多少次
            proxy_handler = request.ProxyHandler({'http': proxy_dict})
            opener = request.build_opener(proxy_handler)
            request.install_opener(opener)
            countUrl = len(url)
            for i in range(countUrl):  #遍历所有url
                req = request.Request(url[i], headers=head, method='POST')
                try:
                    #lock.acquire()
                    response = request.urlopen(req)  #访问网页
                    html = response.read().decode('utf-8')
                    print(html)
                    #lock.release()
                except urllib.error.URLError as e:
                    print(e.reason)
                    print("EEEEEE")
            #time.sleep(1)  #间隔执行

        except Exception:  #出现异常
            print('Retry')
            count1 = count1 + 1
            time.sleep(1)  #间隔执行
    else:
        print('much error') 
Example #14
Source File: common.py    From acmpv with Do What The F*ck You Want To Public License 5 votes vote down vote up
def unset_proxy():
    proxy_handler = request.ProxyHandler({})
    opener = request.build_opener(proxy_handler)
    request.install_opener(opener)

# DEPRECATED in favor of set_proxy() and unset_proxy() 
Example #15
Source File: common.py    From acmpv with Do What The F*ck You Want To Public License 5 votes vote down vote up
def set_http_proxy(proxy):
    if proxy == None: # Use system default setting
        proxy_support = request.ProxyHandler()
    elif proxy == '': # Don't use any proxy
        proxy_support = request.ProxyHandler({})
    else: # Use proxy
        proxy_support = request.ProxyHandler({'http': '%s' % proxy, 'https': '%s' % proxy})
    opener = request.build_opener(proxy_support)
    request.install_opener(opener) 
Example #16
Source File: common.py    From acmpv with Do What The F*ck You Want To Public License 5 votes vote down vote up
def unset_proxy():
    proxy_handler = request.ProxyHandler({})
    opener = request.build_opener(proxy_handler)
    request.install_opener(opener)

# DEPRECATED in favor of set_proxy() and unset_proxy() 
Example #17
Source File: common.py    From acmpv with Do What The F*ck You Want To Public License 5 votes vote down vote up
def set_proxy(proxy):
    proxy_handler = request.ProxyHandler({
        'http': '%s:%s' % proxy,
        'https': '%s:%s' % proxy,
    })
    opener = request.build_opener(proxy_handler)
    request.install_opener(opener) 
Example #18
Source File: webwxapi.py    From WxRobot with MIT License 5 votes vote down vote up
def __init__(self):
        self.DEBUG = False
        self.appid = 'wx782c26e4c19acffb'
        self.uuid = ''
        self.base_uri = ''
        self.redirect_uri = ''
        self.uin = ''
        self.sid = ''
        self.skey = ''
        self.pass_ticket = ''
        self.deviceId = 'e' + repr(random.random())[2:17]
        self.BaseRequest = {}
        self.synckey = ''
        self.SyncKey = []
        self.User = []
        self.MemberList = []
        self.ContactList = []
        self.GroupList = []
        self.autoReplyMode = False
        self.syncHost = ''

        self._handlers = dict((k, []) for k in self.message_types)
        self._handlers['location'] = []
        self._handlers['all'] = []

        self._filters = dict()

        opener = request.build_opener(request.HTTPCookieProcessor(CookieJar()))
        opener.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36'),
                             ('Referer','https://wx2.qq.com/')]
        request.install_opener(opener) 
Example #19
Source File: IpGeoLocationLib.py    From IPGeoLocation with GNU General Public License v3.0 5 votes vote down vote up
def __configureProxy(self, proxy):
        #proxy = self.__checkProxyUrl(proxy)
        #if not proxy:
        #    raise MyExceptions.InvalidProxyUrlError()
        
        self.Utils.checkProxyConn(self.URL, proxy.netloc)
        self.Proxy = proxy
        proxyHandler = request.ProxyHandler({'http':proxy.scheme + '://' + proxy.netloc})
        opener = request.build_opener(proxyHandler)
        request.install_opener(opener)
        self.Logger.Print('Proxy ({}) has been configured.'.format(proxy.scheme + '://' + proxy.netloc)) 
Example #20
Source File: Config.py    From watchdog with Apache License 2.0 5 votes vote down vote up
def getFile(cls, getfile, unpack=True):
        if cls.getProxy():
            proxy = req.ProxyHandler({'http': cls.getProxy(), 'https': cls.getProxy()})
            auth = req.HTTPBasicAuthHandler()
            opener = req.build_opener(proxy, auth, req.HTTPHandler)
            req.install_opener(opener)
        if cls.ignoreCerts():
            ctx = ssl.create_default_context()
            ctx.check_hostname = False
            ctx.verify_mode = ssl.CERT_NONE
            opener = req.build_opener(urllib.request.HTTPSHandler(context=ctx))
            req.install_opener(opener)

        response = req.urlopen(getfile)
        data = response
        # TODO: if data == text/plain; charset=utf-8, read and decode
        if unpack:
            if   'gzip' in response.info().get('Content-Type'):
                buf = BytesIO(response.read())
                data = gzip.GzipFile(fileobj=buf)
            elif 'bzip2' in response.info().get('Content-Type'):
                data = BytesIO(bz2.decompress(response.read()))
            elif 'zip' in response.info().get('Content-Type'):
                fzip = zipfile.ZipFile(BytesIO(response.read()), 'r')
                if len(fzip.namelist())>0:
                    data=BytesIO(fzip.read(fzip.namelist()[0]))
        return (data, response)


    # Feeds 
Example #21
Source File: url.py    From fusesoc with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _checkout(self, local_dir):
        url = self.config.get("url")
        logger.info("Downloading...")
        user_agent = self.config.get("user-agent")
        if not self.config.get("verify_cert", True):
            import ssl

            ssl._create_default_https_context = ssl._create_unverified_context

        if user_agent and sys.version_info[0] >= 3:
            opener = urllib.build_opener()
            opener.addheaders = [("User-agent", user_agent)]
            urllib.install_opener(opener)
        try:
            (filename, headers) = urllib.urlretrieve(url)
        except (URLError, HTTPError) as e:
            raise RuntimeError("Failed to download '{}'. '{}'".format(url, e.reason))

        filetype = self.config.get("filetype")
        if filetype == "tar":
            t = tarfile.open(filename)
            t.extractall(local_dir)
        elif filetype == "zip":
            with zipfile.ZipFile(filename, "r") as z:
                z.extractall(local_dir)
        elif filetype == "simple":
            _filename = url.rsplit("/", 1)[1]
            os.makedirs(local_dir)
            shutil.copy2(filename, os.path.join(local_dir, _filename))
        else:
            raise RuntimeError(
                "Unknown file type '" + filetype + "' in [provider] section"
            ) 
Example #22
Source File: __init__.py    From pyhanlp with Apache License 2.0 4 votes vote down vote up
def download(url, path):
    opener = urllib.build_opener()
    opener.addheaders = [('User-agent',
                          'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36')]
    urllib.install_opener(opener)
    if os.path.isfile(path):
        print('使用本地 {}, 忽略 {}'.format(path, url))
        return True
    else:
        print('下载 {} 到 {}'.format(url, path))
        tmp_path = '{}.downloading'.format(path)
        remove_file(tmp_path)
        try:
            def reporthook(count, block_size, total_size):
                global start_time, progress_size
                if count == 0:
                    start_time = time.time()
                    progress_size = 0
                    return
                duration = time.time() - start_time
                duration = max(1e-8, duration)  # 防止除零错误
                progress_size = int(count * block_size)
                if progress_size > total_size:
                    progress_size = total_size
                speed = int(progress_size / (1024 * duration))
                ratio = progress_size / total_size
                ratio = max(1e-8, ratio)
                percent = ratio * 100
                eta = duration / ratio * (1 - ratio)
                minutes = eta / 60
                seconds = eta % 60
                sys.stdout.write("\r%.2f%%, %d MB, %d KB/s, 还有 %d 分 %2d 秒   " %
                                 (percent, progress_size / (1024 * 1024), speed, minutes, seconds))
                sys.stdout.flush()

            import socket
            socket.setdefaulttimeout(10)
            urllib.urlretrieve(quote(url, safe='/:?='), tmp_path, reporthook)
            print()
        except BaseException as e:
            eprint('下载失败 {} 由于 {}'.format(url, repr(e)))
            doc_url = 'https://github.com/hankcs/pyhanlp'
            eprint('请参考 %s 执行手动安装.' % doc_url)
            eprint('或手动下载 {} 到 {}'.format(url, path))
            if os.path.isfile(tmp_path):
                os.remove(tmp_path)
            browser_open(doc_url)
            exit(1)
        remove_file(path)
        os.rename(tmp_path, path)
    return True 
Example #23
Source File: request.py    From Atlas with GNU General Public License v3.0 4 votes vote down vote up
def send(self,url,method='GET',data=None,headers={}):
		agent    = self.kwargs['agent']
		proxy    = self.kwargs['proxy']
		cookie   = self.kwargs['cookie']
		timeout  = self.kwargs['timeout']
		headers_ = self.kwargs['headers'] if self.kwargs['headers'] == headers else headers
		redirect = self.kwargs['allow-redirect']
		# -- process -- # 
		if method:method = method.upper()
		if data is None: data = {}
		# -- disable ssl check -- *
		ctx = ssl.create_default_context()                                                                      
		ctx.check_hostname = False                                                                              
		ctx.verify_mode = ssl.CERT_NONE
		# -- add headers -- #
		headers = {}
		headers['User-Agent'] = agent
		for header in headers_.items():
			headers[header[0]] = header[1]
		# -- //
		if cookie:headers['Cookie'] = cookie
		# -- socket timeout -- #
		if timeout:socket.setdefaulttimeout(timeout)
		# -- handled http and https -- #
		handlers = [urllib2.HTTPHandler(),urllib2.HTTPSHandler(context=ctx)]
		# -- process redirect -- #
		if redirect is False:handlers.append(NoRedirectHandler)
		# -- process proxies -- # 
		if proxy:
			handlers.append(urllib2.ProxyHandler({
				'http' : proxy,
				'https': proxy
				})
			)
		# -- install opener -- #
		opener = urllib2.build_opener(*handlers)
		urllib2.install_opener(opener)
		# -- process request -- #
		if method.lower() == "get":
			if data: url = get_params(url,data)
			req = urllib2.Request(url,headers=headers)
		elif method.lower() == "post":
			req = urllib2.Request(url,data=data,headers=headers)
		# -- urlopen -- #
		try:
			resp = urllib2.urlopen(req)
		except urllib2.HTTPError as e:
			resp = e 
		return Resp(resp) 
Example #24
Source File: Navigation.py    From plugin.video.netflix with MIT License 4 votes vote down vote up
def call_netflix_service(self, params):
        """
        Makes a GET request to the internal Netflix HTTP proxy
        and returns the result

        Parameters
        ----------
        params : :obj:`dict` of  :obj:`str`
            List of paramters to be url encoded

        Returns
        -------
        :obj:`dict`
            Netflix Service RPC result
        """
        cache = params.pop('cache', None)
        values = urlencode(params)
        # check for cached items
        if cache:
            cached_value = self.kodi_helper.get_cached_item(
                cache_id=values)

            # Cache lookup successful?
            if cached_value is not None:
                self.log(
                    msg='Fetched item from cache: (cache_id=' + values + ')')
                return cached_value

        url = self.get_netflix_service_url()
        full_url = url + '?' + values
        # don't use proxy for localhost
        if urlparse(url).hostname in ('localhost', '127.0.0.1', '::1'):
            opener = Request.build_opener(Request.ProxyHandler({}))
            Request.install_opener(opener)
        data = Request.urlopen(full_url).read()
        parsed_json = json.loads(data, object_pairs_hook=OrderedDict)
        if 'error' in parsed_json:
            result = {'error': parsed_json.get('error')}
            return result
        result = parsed_json.get('result', None)
        if result and cache:
            self.log(msg='Adding item to cache: (cache_id=' + values + ')')
            self.kodi_helper.add_cached_item(cache_id=values, contents=result)
        return result 
Example #25
Source File: client.py    From bugatsinho.github.io with GNU General Public License v3.0 4 votes vote down vote up
def cfcookie(netloc, ua, timeout):
    try:
        headers = {'User-Agent': ua}

        req = urllib2.Request(netloc, headers=headers)

        try:
            urllib2.urlopen(req, timeout=int(timeout))
        except urllib2.HTTPError as response:
            result = response.read(5242880)

        jschl = re.findall('name="jschl_vc" value="(.+?)"/>', result)[0]

        init = re.findall('setTimeout\(function\(\){\s*.*?.*:(.*?)};', result)[-1]

        builder = re.findall(r"challenge-form\'\);\s*(.*)a.v", result)[0]

        decryptVal = parseJSString(init)

        lines = builder.split(';')

        for line in lines:

            if len(line) > 0 and '=' in line:

                sections = line.split('=')
                line_val = parseJSString(sections[1])
                decryptVal = int(eval(str(decryptVal) + str(sections[0][-1]) + str(line_val)))

        answer = decryptVal + len(urlparse.urlparse(netloc).netloc)

        query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (netloc, jschl, answer)

        if 'type="hidden" name="pass"' in result:
            passval = re.findall('name="pass" value="(.*?)"', result)[0]
            query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (
                netloc, quote_plus(passval), jschl, answer
            )
            time.sleep(5)

        cookies = cookielib.LWPCookieJar()
        handlers = [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
        opener = urllib2.build_opener(*handlers)
        urllib2.install_opener(opener)

        try:
            req = urllib2.Request(query, headers=headers)
            urllib2.urlopen(req, timeout=int(timeout))
        except BaseException:
            pass

        cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])

        return cookie
    except BaseException:
        pass