Python urllib2.unquote() Examples
The following are 30
code examples of urllib2.unquote().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
urllib2
, or try the search function
.
Example #1
Source File: bullyblinder.py From DATA with GNU General Public License v3.0 | 6 votes |
def obfuscation_unescape(page): soup = BeautifulSoup(page, "lxml") for scr in soup(["script"]): if re.search('unescape', str(scr), re.IGNORECASE): encoded = re.search("(?:%[0-9A-F][0-9A-F][^\"]+)", str(scr), re.IGNORECASE) decoded_content = urllib2.unquote(encoded.group(0)) scr.replace_with(decoded_content) decoded_page = soup.decode(formatter=None) tmp_file = "/tmp/tmp.html" with open (tmp_file, "wb") as temp_f: temp_f.write(decoded_page) temp_f.close() try: response = br.open('file://' + tmp_file) global using_selenium using_selenium = True return response except Exception: return False
Example #2
Source File: cpacker.py From plugin.video.bimozie with GNU General Public License v3.0 | 6 votes |
def _cleanstr(self, str): str = str.strip() if str.find("function") == 0: pattern = (r"=\"([^\"]+).*}\s*\((\d+)\)") args = re.search(pattern, str, re.DOTALL) if args: a = args.groups() def openload_re(match): c = match.group(0) b = ord(c) + int(a[1]) return chr(b if (90 if c <= "Z" else 122) >= b else b - 26) str = re.sub(r"[a-zA-Z]", openload_re, a[0]); str = urllib2.unquote(str) elif str.find("decodeURIComponent") == 0: str = re.sub(r"(^decodeURIComponent\s*\(\s*('|\"))|(('|\")\s*\)$)", "", str); str = urllib2.unquote(str) elif str.find("\"") == 0: str = re.sub(r"(^\")|(\"$)|(\".*?\")", "", str); elif str.find("'") == 0: str = re.sub(r"(^')|('$)|('.*?')", "", str); return str
Example #3
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 6 votes |
def parseucaster(self, url, referer,options): print ("a", url, referer,options) req = urllib2.Request(url) req.add_header('Referer', 'http://' + referer) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0') response = urllib2.urlopen(req) link = response.read() response.close() match = re.search('"file": "(.*?)"', link) print ("ZZZZzzzz", link) if match: link = urllib.unquote(match.group( 1)) + ' pageUrl=http://aliez.tv/live/mlb/ swfUrl=http://player.longtailvideo.com/player.swf app=aliezlive-live live=true tcUrl=rtmp://play.aliez.com/aliezlive-live' return link else: return False
Example #4
Source File: addon.py From plugin.video.xunleicloud with GNU General Public License v2.0 | 6 votes |
def cloudspace(): ''' show xunlei cloud space content ''' dhurl = '%s/%s/?type=all&order=create&t=%s' % ( cloudurlpre, 'req_history_play_list/req_num/200/req_offset/0', cachetime) rsp = xl.urlopen(dhurl) vods = json.loads(rsp)['resp']['history_play_list'] menu = [ {'label': urllib2.unquote(v['file_name'].encode('utf-8')), 'path': plugin.url_for('playcloudvideo', vinfo=str((v['src_url'], v['gcid'], v['cid'], v['file_name'])))} for v in vods if 'src_url' in v] return menu
Example #5
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 6 votes |
def parseraliez(self, url, referer,options): req = urllib2.Request(url) req.add_header('Referer', referer) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0') response = urllib2.urlopen(req) link = response.read() response.close() match = re.search('"file":(.*?)"(.*?)"', link) print ("ZZZZzzzz", match, link) print match.group(2) if match: link = urllib.unquote(match.group( 2)) + ' pageUrl=http://aliez.tv/live/mlb/ swfUrl=http://player.longtailvideo.com/player.swf app=aliezlive-live live=true tcUrl=rtmp://play.aliez.com/aliezlive-live' return link else: return False
Example #6
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 6 votes |
def parserputlive(self, url, referer,options): print ("a", url, referer,options) req = urllib2.Request(url) req.add_header('Referer', 'http://' + referer) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0') response = urllib2.urlopen(req) link = response.read() response.close() print ("Link", link) match = re.compile('html\(unescape\("(.*?)"\)\);').findall(link) if len(match) > 0: print urllib.unquote(match[0]) match1 = re.compile('src="(.*?)"').findall(urllib.unquote(match[0])) match2 = re.compile('streamer=(.*?)&').findall(urllib.unquote(match[0])) match3 = re.compile('file=(.*?)&').findall(urllib.unquote(match[0])) print ("Link", match1) print ("Link", match2) print ("Link", match3) return match2[0] + match3[0] + ' pageUrl=' + match1[0] + ' swfUrl=' + match1[0] #parsertopupload
Example #7
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 6 votes |
def parserVIDZER(self,url,referer, options): query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True } link = self.cm.getURLRequestData(query_data) match = re.search('href="(http[^"]+?getlink[^"]+?)"', link) if match: url = urllib.unquote( match.group(1) ) return url r = re.search('value="(.+?)" name="fuck_you"', link) r2 = re.search('name="confirm" type="submit" value="(.+?)"', link) r3 = re.search('<a href="/file/([^"]+?)" target', link) if r: query_data = { 'url': 'http://www.vidzer.net/e/'+r3.group(1)+'?w=631&h=425', 'use_host': False, 'use_cookie': False, 'use_post': True, 'return_data': True } postdata = {'confirm' : r2.group(1), 'fuck_you' : r.group(1)} link = self.cm.getURLRequestData(query_data, postdata) match = re.search("url: '([^']+?)'", link) if match: url = match.group(1) #+ '|Referer=http://www.vidzer.net/media/flowplayer/flowplayer.commercial-3.2.18.swf' return url else: return '' else: return ''
Example #8
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 6 votes |
def myviru(self,url,referer,options): COOKIEFILE = self.cookieFileName('myviru') query_data = { 'url': url, 'use_host': False, 'use_header': False, 'use_cookie': True, 'cookiefile': COOKIEFILE, 'save_cookie': True, 'use_post': False, 'return_data': True } link = self.cm.getURLRequestData(query_data) self.log.info('aaa %s' % link) linkvideo = '' match2= re.compile("dataUrl:'(.*?)',").findall(link) if len(match2)>0: mylink = 'http://myvi.ru'+urllib2.unquote(match2[0]) query_data = { 'url': mylink, 'use_host': False, 'use_header': False, 'use_cookie': True, 'cookiefile': COOKIEFILE, 'save_cookie': True, 'load_cookie': True, 'use_post': False, 'return_data': True } result = self.cm.getURLRequestData(query_data) result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '') match3= re.compile('"video":\[{"url":"([^"]+)"}\]').findall(result) if len(match3)>0: self.log.info('aaa %s' % match3) mycook = self.cm.getCookieItem(COOKIEFILE,'UniversalUserID') mycook = urllib.urlencode({'UniversalUserID':mycook}) self.log.info('aaa %s' % mycook) return '%s|Cookie=%s' % (match3[0], mycook) return linkvideo
Example #9
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 6 votes |
def parserputlive(self, url, referer,options): print ("a", url, referer,options) req = urllib2.Request(url) req.add_header('Referer', 'http://' + referer) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0') response = urllib2.urlopen(req) link = response.read() response.close() print ("Link", link) match = re.compile('html\(unescape\("(.*?)"\)\);').findall(link) if len(match) > 0: print urllib.unquote(match[0]) match1 = re.compile('src="(.*?)"').findall(urllib.unquote(match[0])) match2 = re.compile('streamer=(.*?)&').findall(urllib.unquote(match[0])) match3 = re.compile('file=(.*?)&').findall(urllib.unquote(match[0])) print ("Link", match1) print ("Link", match2) print ("Link", match3) return match2[0] + match3[0] + ' pageUrl=' + match1[0] + ' swfUrl=' + match1[0] #parsertopupload
Example #10
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 6 votes |
def parseraliez(self, url, referer,options): req = urllib2.Request(url) req.add_header('Referer', referer) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0') response = urllib2.urlopen(req) link = response.read() response.close() match = re.search('"file":(.*?)"(.*?)"', link) print ("ZZZZzzzz", match, link) print match.group(2) if match: link = urllib.unquote(match.group( 2)) + ' pageUrl=http://aliez.tv/live/mlb/ swfUrl=http://player.longtailvideo.com/player.swf app=aliezlive-live live=true tcUrl=rtmp://play.aliez.com/aliezlive-live' return link else: return False
Example #11
Source File: vuittv.py From tvalacarta with GNU General Public License v3.0 | 6 votes |
def get_video_url(page_url, premium = False, user="", password="", video_password="", page_data=""): logger.info("tvalacarta.servers.vuittv get_video_url page_url="+page_url) data = scrapertools.cache_page(page_url) url2 = scrapertools.find_single_match(data,'<iframe width="[^"]+" height="[^"]+" scrolling="[^"]+" data-src="(http://www-arucitys-com.filesusr.com[^"]+)"') logger.info("url2="+url2) data = scrapertools.cache_page(url2) media_url = scrapertools.find_single_match(data,'"sourceURL"\:"([^"]+)"') logger.info("media_url="+media_url) media_url = urllib2.unquote(media_url) logger.info("media_url="+media_url) video_urls = [] video_urls.append([ scrapertools.get_filename_from_url(media_url)[-4:], media_url ]) return video_urls # Encuentra vÃdeos del servidor en el texto pasado
Example #12
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 6 votes |
def parseucaster(self, url, referer,options): print ("a", url, referer,options) req = urllib2.Request(url) req.add_header('Referer', 'http://' + referer) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0') response = urllib2.urlopen(req) link = response.read() response.close() match = re.search('"file": "(.*?)"', link) print ("ZZZZzzzz", link) if match: link = urllib.unquote(match.group( 1)) + ' pageUrl=http://aliez.tv/live/mlb/ swfUrl=http://player.longtailvideo.com/player.swf app=aliezlive-live live=true tcUrl=rtmp://play.aliez.com/aliezlive-live' return link else: return False
Example #13
Source File: gsearch.py From Google-Alfred3-Workflow with MIT License | 6 votes |
def handle_starttag(self, tag, attrs): if tag == 'h3' and attrs == [('class', 'r')]: self.h3_flag = True if tag == 'a' and self.h3_flag: self.a_flag = True if tag == 'b' and self.a_flag: self.b_flag = True if self.a_flag: for (key, value) in attrs: if key == 'href': if value.startswith("/url?"): m = match('/url\?(url|q)=(.+?)&', value) if m and len(m.groups()) == 2: href = urllib2.unquote(m.group(2)) self.link = href else: self.link = value
Example #14
Source File: googlevideo.py From filmkodi with Apache License 2.0 | 6 votes |
def _parse_gdocs(self, html): urls = [] for match in re.finditer('\[\s*"([^"]+)"\s*,\s*"([^"]+)"\s*\]', html): key, value = match.groups() if key == 'fmt_stream_map': items = value.split(',') for item in items: _source_itag, source_url = item.split('|') if isinstance(source_url, unicode): source_url = source_url.encode('utf-8') source_url = source_url.decode('unicode_escape') quality = self.itag_map.get(_source_itag, 'Unknown Quality [%s]' % _source_itag) source_url = urllib2.unquote(source_url) urls.append((quality, source_url)) return urls return urls
Example #15
Source File: favorites.py From plugin.video.vrt.nu with GNU General Public License v3.0 | 6 votes |
def manage(self): """Allow the user to unselect favorites to be removed from the listing""" from utils import url_to_program self.refresh(ttl=0) if not self._data: ok_dialog(heading=localize(30418), message=localize(30419)) # No favorites found return def by_title(item): """Sort by title""" return item.get('value').get('title') items = [dict(program=url_to_program(value.get('value').get('programUrl')), title=unquote(value.get('value').get('title')), enabled=value.get('value').get('isFavorite')) for value in list(sorted(list(self._data.values()), key=by_title))] titles = [item['title'] for item in items] preselect = [idx for idx in range(0, len(items) - 1) if items[idx]['enabled']] selected = multiselect(localize(30420), options=titles, preselect=preselect) # Please select/unselect to follow/unfollow if selected is not None: for idx in set(preselect).difference(set(selected)): self.unfollow(program=items[idx]['program'], title=items[idx]['title']) for idx in set(selected).difference(set(preselect)): self.follow(program=items[idx]['program'], title=items[idx]['title'])
Example #16
Source File: utils_log.py From motu-client-python with GNU Lesser General Public License v3.0 | 6 votes |
def log_url(log, message, url, level = logging.DEBUG ): """Nicely logs the given url. Print out the url with the first part (protocol, host, port, authority, user info, path, ref) and in sequence all the query parameters. log: the log into which write the message message: a message to print before the url url: the url to log level: (optional) the log level to use""" urls = url.split('?') log.log( level, message + unquote(urls[0]) ) if len(urls) > 1: for a in sorted(urls[1].split('&')): param = a.split('=') if( len(param) < 2 ): param.append('') log.log( level, ' . %s = %s', unquote(param[0]), unquote(param[1]) )
Example #17
Source File: tokenresolver.py From plugin.video.vrt.nu with GNU General Public License v3.0 | 6 votes |
def _get_xvrttoken(self, login_json=None): """Get a one year valid X-VRT-Token""" from json import dumps if not login_json: login_json = self._get_login_json() login_token = login_json.get('sessionInfo', {}).get('login_token') if not login_token: return None login_cookie = 'glt_{api_key}={token}'.format(api_key=self._API_KEY, token=login_token) payload = dict( uid=login_json.get('UID'), uidsig=login_json.get('UIDSignature'), ts=login_json.get('signatureTimestamp'), email=from_unicode(get_setting('username')) ) data = dumps(payload).encode() headers = {'Content-Type': 'application/json', 'Cookie': login_cookie} log(2, 'URL post: {url}', url=unquote(self._TOKEN_GATEWAY_URL)) req = Request(self._TOKEN_GATEWAY_URL, data=data, headers=headers) setcookie_header = urlopen(req).info().get('Set-Cookie') xvrttoken = TokenResolver._create_token_dictionary(setcookie_header) if xvrttoken is None: return None notification(message=localize(30952)) # Login succeeded. return xvrttoken
Example #18
Source File: tests.py From django-wham with MIT License | 6 votes |
def build_httmock_functions(mock_response_dir): print 'building mock functions' functions = [] for filename in listdir(mock_response_dir): filepath = join(mock_response_dir,filename) if isfile(filepath): method = None for _method in ('GET', 'POST', 'PUT', 'DELETE', 'PATCH'): if filename.startswith(_method): filename = filename[len(_method):] method = _method url = urllib2.unquote(filename) parts = urlparse(url) params = {} if parts.query: print parts.query params = dict(parse_qsl(parts.query)) print params with open(filepath) as f: content = f.read() functions.append(build_httmock_function( parts.scheme, parts.netloc, parts.path, content, params=params, method=method)) return functions
Example #19
Source File: vvvvid.py From plugin.video.vvvvid with GNU General Public License v2.0 | 6 votes |
def dec_ei(h): g = 'MNOPIJKL89+/4567UVWXQRSTEFGHABCDcdefYZabstuvopqr0123wxyzklmnghij' c = list() for e in range(0,len(h)): c.append(g.find(h[e])) for e in range(len(c)*2-1,-1,-1): #print 'e=' + str(e) a = c[e % len(c)] ^ c[(e+1)%len(c)] #print 'a='+str(a) c[e%len(c)] = a #print 'c['+str(e % len(c))+']='+ str(c[e % len(c)]) c = f(c) d = '' for e in range(0,len(c)): d += '%'+ (('0'+ (str(format(c[e],'x'))))[-2:]) return urllib2.unquote(d)
Example #20
Source File: googlevideo.py From filmkodi with Apache License 2.0 | 6 votes |
def __extract_video(self, item): sources = [] for e in item: if isinstance(e, dict): for key in e: for item2 in e[key]: if isinstance(item2, list): for item3 in item2: if isinstance(item3, list): for item4 in item3: if isinstance(item4, unicode): item4 = item4.encode('utf-8') if isinstance(item4, basestring): item4 = urllib2.unquote(item4).decode('unicode_escape') for match in re.finditer('url=(?P<link>[^&]+).*?&itag=(?P<itag>[^&]+)', item4): link = match.group('link') itag = match.group('itag') quality = self.itag_map.get(itag, 'Unknown Quality [%s]' % itag) sources.append((quality, link)) if sources: return sources return sources
Example #21
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 6 votes |
def myviru(self,url,referer,options): COOKIEFILE = ptv.getAddonInfo('path') + os.path.sep + "cookies" + os.path.sep + "myviru.cookie" query_data = { 'url': url, 'use_host': False, 'use_header': False, 'use_cookie': True, 'cookiefile': COOKIEFILE, 'save_cookie': True, 'use_post': False, 'return_data': True } link = self.cm.getURLRequestData(query_data) linkvideo = '' match2= re.compile("dataUrl:'(.*?)',").findall(link) if len(match2)>0: mylink = 'http://myvi.ru'+urllib2.unquote(match2[0]) query_data = { 'url': mylink, 'use_host': False, 'use_header': False, 'use_cookie': True, 'cookiefile': COOKIEFILE, 'load_cookie': True, 'use_post': False, 'return_data': True } result = self.cm.getURLRequestData(query_data) result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '') match3= re.compile('"video":\[{"url":"([^"]+)"}\]').findall(result) if len(match3)>0: mycook = self.cm.getCookieItem(COOKIEFILE,'UniversalUserID') mycook = urllib.urlencode({'UniversalUserID':mycook}) return '%s|Cookie=%s' % (match3[0], mycook) return linkvideo
Example #22
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 5 votes |
def parserSAWLIVE(self, url, referer,options): def decode(tmpurl): host = self.getHostName(tmpurl) result = '' for i in host: result += hex(ord(i)).split('x')[1] return result query = urlparse.urlparse(url) channel = query.path channel = channel.replace("/embed/", "") print("chanel",channel) query_data = {'url': 'http://www.sawlive.tv/embed/' + channel, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True, 'header' : {'Referer': referer, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0'}} link21 = self.cm.getURLRequestData(query_data) match = re.compile('eval\(function\(p,a,c,k,e,d\)(.*?)split\(\'\|\'\),0,{}\)\)').findall(link21) txtjs = "eval(function(p,a,c,k,e,d)" + match[-1] +"split('|'),0,{}))" link2 = beautify(txtjs) match21 = re.compile("var escapa = unescape\('(.*?)'\);").findall(link21) start = urllib.unquote(match21[0]).find('src="') end = len(urllib.unquote(match21[0])) url = urllib.unquote(match21[0])[start + 5:end] + '/' + decode(referer) query_data = {'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True} link22 = self.cm.getURLRequestData(query_data) match22 = re.compile("SWFObject\('(.*?)','mpl','100%','100%','9'\);").findall(link22) match23 = re.compile("so.addVariable\('file', '(.*?)'\);").findall(link22) match24 = re.compile("so.addVariable\('streamer', '(.*?)'\);").findall(link22) print ("Match", match22, match23, match24, link22) videolink = match24[0] + ' playpath=' + match23[0] + ' swfUrl=' + match22[ 0] + ' pageUrl=http://sawlive.tv/embed/' + channel + ' live=true swfVfy=true' return videolink
Example #23
Source File: ecostream.py From filmkodi with Apache License 2.0 | 5 votes |
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content if re.search('>File not found!<', html): raise ResolverError('File Not Found or removed') web_url = 'http://www.ecostream.tv/js/ecoss.js' js = self.net.http_GET(web_url).content r = re.search("\$\.post\('([^']+)'[^;]+'#auth'\).html\(''\)", js) if not r: raise ResolverError('Posturl not found') post_url = r.group(1) r = re.search('data\("tpm",([^\)]+)\);', js) if not r: raise ResolverError('Postparameterparts not found') post_param_parts = r.group(1).split('+') found_parts = [] for part in post_param_parts: pattern = "%s='([^']+)'" % part.strip() r = re.search(pattern, html) if not r: raise ResolverError('Formvaluepart not found') found_parts.append(r.group(1)) tpm = ''.join(found_parts) # emulate click on button "Start Stream" headers = ({'Referer': web_url, 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': common.IE_USER_AGENT}) web_url = 'http://www.ecostream.tv' + post_url html = self.net.http_POST(web_url, {'id': media_id, 'tpm': tpm}, headers=headers).content sPattern = '"url":"([^"]+)"' r = re.search(sPattern, html) if not r: raise ResolverError('Unable to resolve Ecostream link. Filelink not found.') stream_url = 'http://www.ecostream.tv' + r.group(1) stream_url = urllib2.unquote(stream_url) stream_url = urllib2.urlopen(urllib2.Request(stream_url, headers=headers)).geturl() return stream_url + helpers.append_headers({'User-Agent': common.IE_USER_AGENT})
Example #24
Source File: urlfetch.py From python-compat-runtime with Apache License 2.0 | 5 votes |
def _is_fetching_self(url, method): """Checks if the fetch is for the same URL from which it originated. Args: url: str; the URL being fetched. method: Value from `_VALID_METHODS`. Returns: Boolean indicating whether or not it seems that the app is trying to fetch itself. """ if (method != GET or "HTTP_HOST" not in os.environ or "PATH_INFO" not in os.environ): return False _, host_port, path, _, _ = urlparse.urlsplit(url) if host_port == os.environ['HTTP_HOST']: current_path = urllib2.unquote(os.environ['PATH_INFO']) desired_path = urllib2.unquote(path) if (current_path == desired_path or (current_path in ('', '/') and desired_path in ('', '/'))): return True return False
Example #25
Source File: domino.py From python-domino with Apache License 2.0 | 5 votes |
def parse_play_flash_cookie(response): flash_cookie = response.cookies['PLAY_FLASH'] messageType, message = flash_cookie.split("=") # Format message into user friendly string message = urllib2.unquote(message).replace("+", " ") # Discern error disposition if (messageType == "dominoFlashError"): error = True else: error = False return dict(messageType=messageType, message=message, error=error)
Example #26
Source File: mrknow_urlparser.py From filmkodi with Apache License 2.0 | 5 votes |
def parserSAWLIVE(self, url, referer,options): def decode(tmpurl): host = self.getHostName(tmpurl) result = '' for i in host: result += hex(ord(i)).split('x')[1] return result query = urlparse.urlparse(url) channel = query.path channel = channel.replace("/embed/", "") print("chanel",channel) query_data = {'url': 'http://www.sawlive.tv/embed/' + channel, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True, 'header' : {'Referer': referer, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0'}} link21 = self.cm.getURLRequestData(query_data) match = re.compile('eval\(function\(p,a,c,k,e,d\)(.*?)split\(\'\|\'\),0,{}\)\)').findall(link21) txtjs = "eval(function(p,a,c,k,e,d)" + match[-1] +"split('|'),0,{}))" link2 = beautify(txtjs) match21 = re.compile("var escapa = unescape\('(.*?)'\);").findall(link21) start = urllib.unquote(match21[0]).find('src="') end = len(urllib.unquote(match21[0])) url = urllib.unquote(match21[0])[start + 5:end] + '/' + decode(referer) query_data = {'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True} link22 = self.cm.getURLRequestData(query_data) match22 = re.compile("SWFObject\('(.*?)','mpl','100%','100%','9'\);").findall(link22) match23 = re.compile("so.addVariable\('file', '(.*?)'\);").findall(link22) match24 = re.compile("so.addVariable\('streamer', '(.*?)'\);").findall(link22) print ("Match", match22, match23, match24, link22) videolink = match24[0] + ' playpath=' + match23[0] + ' swfUrl=' + match22[ 0] + ' pageUrl=http://sawlive.tv/embed/' + channel + ' live=true swfVfy=true' return videolink
Example #27
Source File: utils.py From nautilus-git with GNU General Public License v3.0 | 5 votes |
def get_file_path(uri): """Return file path from an uri.""" url = urlsplit(uri) if url.scheme.lower() == "file": return unquote(url.path) return None
Example #28
Source File: tokenresolver.py From plugin.video.vrt.nu with GNU General Public License v3.0 | 5 votes |
def _get_usertoken(self, name=None, login_json=None): """Get a user X-VRT-Token, vrtlogin-at, vrtlogin-expiry, vrtlogin-rt, SESSION, OIDCXSRF or state token""" if not login_json: login_json = self._get_login_json() cookiejar = cookielib.CookieJar() opener = build_opener(HTTPCookieProcessor(cookiejar), ProxyHandler(self._proxies)) log(2, 'URL get: {url}', url=unquote(self._USER_TOKEN_GATEWAY_URL)) opener.open(self._USER_TOKEN_GATEWAY_URL) xsrf = next((cookie for cookie in cookiejar if cookie.name == 'OIDCXSRF'), None) if xsrf is None: return None payload = dict( UID=login_json.get('UID'), UIDSignature=login_json.get('UIDSignature'), signatureTimestamp=login_json.get('signatureTimestamp'), client_id='vrtnu-site', _csrf=xsrf.value ) data = urlencode(payload).encode() log(2, 'URL post: {url}', url=unquote(self._VRT_LOGIN_URL)) opener.open(self._VRT_LOGIN_URL, data=data) # Cache additional tokens for later use refreshtoken = TokenResolver._create_token_dictionary(cookiejar, cookie_name='vrtlogin-rt') accesstoken = TokenResolver._create_token_dictionary(cookiejar, cookie_name='vrtlogin-at') if refreshtoken is not None: from json import dumps cache_file = self._get_token_filename('vrtlogin-rt') update_cache(cache_file, dumps(refreshtoken), self._TOKEN_CACHE_DIR) if accesstoken is not None: from json import dumps cache_file = self._get_token_filename('vrtlogin-at') update_cache(cache_file, dumps(accesstoken), self._TOKEN_CACHE_DIR) return TokenResolver._create_token_dictionary(cookiejar, name)
Example #29
Source File: tokenresolver.py From plugin.video.vrt.nu with GNU General Public License v3.0 | 5 votes |
def _get_roaming_xvrttoken(self): """Get a X-VRT-Token for roaming""" vrtlogin_at = self.get_token('vrtlogin-at') if vrtlogin_at is None: return None cookie_value = 'vrtlogin-at=' + vrtlogin_at headers = {'Cookie': cookie_value} opener = build_opener(NoRedirection, ProxyHandler(self._proxies)) log(2, 'URL get: {url}', url=unquote(self._ROAMING_TOKEN_GATEWAY_URL)) req = Request(self._ROAMING_TOKEN_GATEWAY_URL, headers=headers) req_info = opener.open(req).info() cookie_value += '; state=' + req_info.get('Set-Cookie').split('state=')[1].split('; ')[0] url = req_info.get('Location') log(2, 'URL get: {url}', url=unquote(url)) url = opener.open(url).info().get('Location') headers = {'Cookie': cookie_value} if url is None: return None req = Request(url, headers=headers) log(2, 'URL get: {url}', url=unquote(url)) setcookie_header = opener.open(req).info().get('Set-Cookie') return TokenResolver._create_token_dictionary(setcookie_header)
Example #30
Source File: download.py From pi_romulus with GNU General Public License v2.0 | 5 votes |
def __init__(self, *args, **kwargs): super(Download, self).__init__() self.url = None self.dirs_obj = Directories() self.search = Scraper() # def download(self, link, platform): # """ # Downloads the ROM # """ # # platform = " ".join(rom_url.split('/')[3].replace('_', ' ').split()[:-1]) # target = self.dirs_obj.target_directory(self.download_location, platform) # # req = urllib2.Request(link) # req.add_header('Referer', 'https://www.emuparadise.me/') # file_name = urllib2.unquote(link.split('/')[-1]) # target_file_name = os.path.join(target, file_name) # urllib.urlretrieve(link, target_file_name) # f = urllib2.urlopen(link) # with open(target_file_name, 'wb') as code: # total_length = f.headers.get('content-length') # if not total_length: # code.write(f.content) # else: # total_length = int(total_length) # while True: # data = f.read(total_length / 100) # if not data: # break # code.write(data) # # ex = Compression(location) # ex.extract(target_file_name)