Python urllib.unquote_plus() Examples
The following are 30
code examples of urllib.unquote_plus().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
urllib
, or try the search function
.
Example #1
Source File: test_urllib.py From oss-ftp with MIT License | 6 votes |
def test_unquoting(self): # Make sure unquoting of all ASCII values works escape_list = [] for num in range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %s != %s" % (expect, result)) result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using quote(): not all characters escaped; %s" % result) result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using unquote(): not all characters escaped: " "%s" % result)
Example #2
Source File: fields.py From n6 with GNU Affero General Public License v3.0 | 6 votes |
def _stubbornly_unquote(self, value): # Note: we can assume that the value has been unquoted (from # %-encoding) by the Pyramid stuff, but the following stubborn # unquoting is added for cases when data have been quoted by # the client "too many times"; we try to be "liberal in what we # accept" because, indeed, it is quite easy to get lost in all # this encoding stuff :-). But, on the other hand, we would # not like to allow for any ambiguities, so we accept *only* # URL-safe-Base64-encoding, not standard-Base64-encoding (as # the latter involves '+' whose meaning would not be clear: # it could be interpreted as a plus sign or as a space which, # then, could be interpreted just as an "ignorable filler"...). # Note, therefore, that it becomes *not* crucial whether we use # `urllib.unquote()` or `urllib.unquote_plus()` here -- because # URL-safe-Base64-encoding does *not* allow plus signs (and we # also *forbid* spaces, even as "ignorable fillers"). for _ in xrange(10): # ^ limited number of steps because we do not like allowing # API clients to make us go into an infinite loop... :-] value = urllib.unquote_plus(value) if '%' not in value and '+' not in value: break return value
Example #3
Source File: main_natgeo.py From plugin.video.ustvvod with GNU General Public License v2.0 | 6 votes |
def add_videos(episode_tree, SITE): episodes = [] episode_menu = episode_tree.find_all('div', class_ = 'media-module') show_name = episode_tree.find('h1').text for episode_item in episode_menu: episode_name = episode_item.a['data-title'] episode_thumb = urllib.unquote_plus(episode_item.a.img['data-src'].split('url=')[1]) try: episode_duration = common.format_seconds(episode_item.find('div', class_='timestamp').text.strip()) except: episode_duration = -1 url = episode_item.a['href'] u = sys.argv[0] u += '?url="' + urllib.quote_plus(url) + '"' u += '&mode="' + SITE + '"' u += '&sitemode="play_video"' infoLabels = { 'title' : episode_name, 'durationinseconds' : episode_duration, 'TVShowTitle' : show_name } episodes.append((u, episode_name, episode_thumb, infoLabels, 'list_qualities', False, 'Full Episode')) return episodes
Example #4
Source File: urls.py From plugin.video.kmediatorrent with GNU General Public License v3.0 | 6 votes |
def match(self, path): '''Attempts to match a url to the given path. If successful, a tuple is returned. The first item is the matchd function and the second item is a dictionary containing items to be passed to the function parsed from the provided path. If the provided path does not match this url rule then a NotFoundException is raised. ''' m = self._regex.search(path) if not m: raise NotFoundException # urlunencode the values items = dict((key, unquote_plus(val)) for key, val in m.groupdict().items()) # unpickle any items if present items = unpickle_dict(items) # We need to update our dictionary with default values provided in # options if the keys don't already exist. [items.setdefault(key, val) for key, val in self._options.items()] return self._view_func, items
Example #5
Source File: entities.py From plugin.video.kmediatorrent with GNU General Public License v3.0 | 6 votes |
def extract_from_utmz(self, utmz): parts = utmz.split('.', 4) if len(parts) != 5: raise ValueError('The given "__utmz" cookie value is invalid.') self.creation_time = utils.convert_ga_timestamp(parts[1]) self.response_count = int(parts[3]) params = parts[4].split(Campaign.CAMPAIGN_DELIMITER) for param in params: key, val = param.split('=') try: setattr(self, self.UTMZ_PARAM_MAP[key], unquote_plus(val)) except KeyError: continue return self
Example #6
Source File: proxy.py From plugin.video.ustvvod with GNU General Public License v2.0 | 6 votes |
def serveProxy(self, path, data): realpath = urllib.unquote_plus(path)[6:] proxyconfig = realpath.split('/')[-1] proxy_object = simplejson.loads(proxyconfig) if int(proxy_object['connectiontype']) == 1: proxies = proxy_object['dns_proxy'] MyHTTPHandler._dnsproxy = proxies handler = MyHTTPHandler elif int(proxy_object['connectiontype']) == 2: proxy = proxy_object['proxy'] us_proxy = 'http://' + proxy['us_proxy'] + ':' + proxy['us_proxy_port'] proxy_handler = urllib2.ProxyHandler({'http' : us_proxy}) handler = proxy_handler realpath = realpath.replace('/' + proxyconfig, '') fURL = base64.b64decode(realpath) self.serveFile(fURL, data, handler)
Example #7
Source File: uri_parser.py From satori with Apache License 2.0 | 6 votes |
def _parse_options(opts, delim): """Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ readpreferencetags portion.""" options = {} for opt in opts.split(delim): key, val = opt.split("=") if key.lower() == 'readpreferencetags': options.setdefault('readpreferencetags', []).append(val) else: # str(option) to ensure that a unicode URI results in plain 'str' # option names. 'normalized' is then suitable to be passed as # kwargs in all Python versions. if str(key) in options: warnings.warn("Duplicate URI option %s" % (str(key),)) options[str(key)] = unquote_plus(val) # Special case for deprecated options if "wtimeout" in options: if "wtimeoutMS" in options: options.pop("wtimeout") warnings.warn("Option wtimeout is deprecated, use 'wtimeoutMS'" " instead") return options
Example #8
Source File: uri_parser.py From satori with Apache License 2.0 | 6 votes |
def parse_userinfo(userinfo): """Validates the format of user information in a MongoDB URI. Reserved characters like ':', '/', '+' and '@' must be escaped following RFC 2396. Returns a 2-tuple containing the unescaped username followed by the unescaped password. :Paramaters: - `userinfo`: A string of the form <username>:<password> .. versionchanged:: 2.2 Now uses `urllib.unquote_plus` so `+` characters must be escaped. """ if '@' in userinfo or userinfo.count(':') > 1: raise InvalidURI("':' or '@' characters in a username or password " "must be escaped according to RFC 2396.") user, _, passwd = _partition(userinfo, ":") # No password is expected with GSSAPI authentication. if not user: raise InvalidURI("The empty string is not valid username.") user = unquote_plus(user) passwd = unquote_plus(passwd) return user, passwd
Example #9
Source File: pan.baidu.com.py From iScript with MIT License | 6 votes |
def do4(self, paths): for path in paths: r = ss.get(path, allow_redirects=False) t = re.search(r'fin=(.+?)(&|$)', r.headers['location']).group(1) name = urllib.unquote_plus(t) self.infos = { 'name': name, 'file': os.path.join(os.getcwd(), name), 'dir_': os.getcwd(), 'dlink': fast_pcs_server(path) } if args.play: panbaiducom_HOME._play_do(self.infos) else: panbaiducom_HOME._download_do(self.infos) break
Example #10
Source File: 115.py From iScript with MIT License | 6 votes |
def do(self, pc): dlink = self.get_dlink(pc) name = re.search(r'/([^/]+?)\?', dlink).group(1) name = urllib.unquote_plus(name) t = os.path.join(os.getcwd(), name) infos = { 'file': t, 'dir_': os.path.split(t)[0], 'dlink': dlink, #'purl': self._get_play_purl(pc) \ # if args.play and self.is_vip else None, 'purl': self._get_play_purl(pc) if args.play else None, 'name': name, 'nn': 1, 'total_file': 1 } self.download(infos)
Example #11
Source File: uri_parser.py From vnpy_crypto with MIT License | 6 votes |
def _parse_options(opts, delim): """Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ readpreferencetags portion.""" options = {} for opt in opts.split(delim): key, val = opt.split("=") if key.lower() == 'readpreferencetags': options.setdefault('readpreferencetags', []).append(val) else: # str(option) to ensure that a unicode URI results in plain 'str' # option names. 'normalized' is then suitable to be passed as # kwargs in all Python versions. if str(key) in options: warnings.warn("Duplicate URI option %s" % (str(key),)) options[str(key)] = unquote_plus(val) # Special case for deprecated options if "wtimeout" in options: if "wtimeoutMS" in options: options.pop("wtimeout") warnings.warn("Option wtimeout is deprecated, use 'wtimeoutMS'" " instead") return options
Example #12
Source File: pycompletion.py From PyDev.Debugger with Eclipse Public License 1.0 | 6 votes |
def GetImports(module_name): try: processor = pycompletionserver.Processor() data = urllib.unquote_plus(module_name) def_file, completions = _pydev_imports_tipper.GenerateTip(data) return processor.formatCompletionMessage(def_file, completions) except: s = StringIO.StringIO() exc_info = sys.exc_info() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], limit=None, file=s) err = s.getvalue() pycompletionserver.dbg('Received error: ' + str(err), pycompletionserver.ERROR) raise #======================================================================================================================= # main #=======================================================================================================================
Example #13
Source File: test_urllib.py From ironpython2 with Apache License 2.0 | 6 votes |
def test_unquoting(self): # Make sure unquoting of all ASCII values works escape_list = [] for num in range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %s != %s" % (expect, result)) result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using quote(): not all characters escaped; %s" % result) result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using unquote(): not all characters escaped: " "%s" % result)
Example #14
Source File: pydevd_referrers.py From PyDev.Debugger with Eclipse Public License 1.0 | 6 votes |
def print_var_node(xml_node, stream): name = xml_node.getAttribute('name') value = xml_node.getAttribute('value') val_type = xml_node.getAttribute('type') found_as = xml_node.getAttribute('found_as') stream.write('Name: ') stream.write(unquote_plus(name)) stream.write(', Value: ') stream.write(unquote_plus(value)) stream.write(', Type: ') stream.write(unquote_plus(val_type)) if found_as: stream.write(', Found as: %s' % (unquote_plus(found_as),)) stream.write('\n') #=================================================================================================== # print_referrers #===================================================================================================
Example #15
Source File: test_urllib.py From BinderFilter with MIT License | 6 votes |
def test_unquoting(self): # Make sure unquoting of all ASCII values works escape_list = [] for num in range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %s != %s" % (expect, result)) result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using quote(): not all characters escaped; %s" % result) result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using unquote(): not all characters escaped: " "%s" % result)
Example #16
Source File: escape.py From teleport with Apache License 2.0 | 6 votes |
def url_unescape(value, encoding='utf-8', plus=True): """Decodes the given value from a URL. The argument may be either a byte or unicode string. If encoding is None, the result will be a byte string. Otherwise, the result is a unicode string in the specified encoding. If ``plus`` is true (the default), plus signs will be interpreted as spaces (literal plus signs must be represented as "%2B"). This is appropriate for query strings and form-encoded values but not for the path component of a URL. Note that this default is the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument """ unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote) if encoding is None: return unquote(utf8(value)) else: return unicode_type(unquote(utf8(value)), encoding)
Example #17
Source File: default.py From tdw with GNU General Public License v3.0 | 6 votes |
def play_url(params): torr_link=params['file'] img=urllib.unquote_plus(params["img"]) #showMessage('heading', torr_link, 10000) TSplayer=tsengine() out=TSplayer.load_torrent(torr_link,'TORRENT',port=aceport) if out=='Ok': for k,v in TSplayer.files.iteritems(): li = xbmcgui.ListItem(urllib.unquote(k)) uri = construct_request({ 'torr_url': torr_link, 'title': k, 'ind':v, 'img':img, 'mode': 'play_url2' }) xbmcplugin.addDirectoryItem(handle, uri, li, False) xbmcplugin.addSortMethod(handle, xbmcplugin.SORT_METHOD_LABEL) xbmcplugin.endOfDirectory(handle) TSplayer.end()
Example #18
Source File: unzip.py From aws-lambda-unzip-py with MIT License | 5 votes |
def lambda_handler(event, context): key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key'].encode('utf8')) try: obj = s3.get_object(Bucket=bucket, Key=key) putObjects = [] with io.BytesIO(obj["Body"].read()) as tf: # rewind the file tf.seek(0) # Read the file as a zipfile and process the members with zipfile.ZipFile(tf, mode='r') as zipf: for file in zipf.infolist(): fileName = file.filename putFile = s3.put_object(Bucket=bucket, Key=fileName, Body=zipf.read(file)) putObjects.append(putFile) print(putFile) # Delete zip file after unzip if len(putObjects) > 0: deletedObj = s3.delete_object(Bucket=bucket, Key=key) print('deleted file:') print(deletedObj) except Exception as e: print(e) print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket)) raise e
Example #19
Source File: pencil.py From quiver with Apache License 2.0 | 5 votes |
def url_unescape(string): if string is None: return return _url_unescape(string)
Example #20
Source File: yunpan.360.cn.py From iScript with MIT License | 5 votes |
def get_path(self, url): url = urllib.unquote_plus(url) f = re.search(r'#(.+?)(&|$)', url) if f: return f.group(1) else: return '/'
Example #21
Source File: debugger_unittest.py From PyDev.Debugger with Eclipse Public License 1.0 | 5 votes |
def wait_for_message(self, accept_message, unquote_msg=True, expect_xml=True, timeout=None): if isinstance(accept_message, (str, int)): msg_starts_with = '%s\t' % (accept_message,) def accept_message(msg): return msg.startswith(msg_starts_with) import untangle from io import StringIO prev = None while True: last = self.get_next_message('wait_for_message', timeout=timeout) if unquote_msg: last = unquote_plus(unquote_plus(last)) if accept_message(last): if expect_xml: # Extract xml and return untangled. xml = '' try: xml = last[last.index('<xml>'):] if isinstance(xml, bytes): xml = xml.decode('utf-8') xml = untangle.parse(StringIO(xml)) except: traceback.print_exc() raise AssertionError('Unable to parse:\n%s\nxml:\n%s' % (last, xml)) ret = xml.xml ret.original_xml = last return ret else: return last if prev != last: print('Ignored message: %r' % (last,)) prev = last
Example #22
Source File: debugger_unittest.py From PyDev.Debugger with Eclipse Public License 1.0 | 5 votes |
def _is_var_in_last(self, expected, last): if expected in last: return True last = unquote_plus(last) if expected in last: return True # We actually quote 2 times on the backend... last = unquote_plus(last) if expected in last: return True return False
Example #23
Source File: debugger_unittest.py From PyDev.Debugger with Eclipse Public License 1.0 | 5 votes |
def get_next_message(self, context_message, timeout=None): if timeout is None: timeout = self.MESSAGES_TIMEOUT try: msg = self._queue.get(block=True, timeout=timeout) except: raise TimeoutError('No message was written in %s seconds. Error message:\n%s' % (timeout, context_message,)) else: frame = sys._getframe().f_back.f_back frame_info = '' while frame: if not frame.f_code.co_name.startswith('test_'): frame = frame.f_back continue if frame.f_code.co_filename.endswith('debugger_unittest.py'): frame = frame.f_back continue stack_msg = ' -- File "%s", line %s, in %s\n' % (frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name) if 'run' == frame.f_code.co_name: frame_info = stack_msg # Ok, found the writer thread 'run' method (show only that). break frame_info += stack_msg frame = frame.f_back # Just print the first which is not debugger_unittest.py break frame = None sys.stdout.write('Message returned in get_next_message(): %s -- ctx: %s, asked at:\n%s\n' % (unquote_plus(unquote_plus(msg)), context_message, frame_info)) if not self.accept_xml_messages: if '<xml' in msg: raise AssertionError('Xml messages disabled. Received: %s' % (msg,)) return msg
Example #24
Source File: tutv.py From tvalacarta with GNU General Public License v3.0 | 5 votes |
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("[tutv.py] get_video_url(page_url='%s')" % page_url) # Busca el ID en la URL id = extract_id(page_url) # Si no lo tiene, lo extrae de la p�gina if id=="": # La descarga data = scrapertools.cache_page(page_url) patron = '<link rel="video_src" href="([^"]+)"/>' matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)>0: id = extract_id(matches[0]) else: id = "" # Descarga el descriptor url = "http://tu.tv/visualizacionExterna2.php?web=undefined&codVideo="+id data = scrapertools.cache_page(url) # Obtiene el enlace al v�deo patronvideos = 'urlVideo0=([^\&]+)\&' matches = re.compile(patronvideos,re.DOTALL).findall(data) #scrapertools.printMatches(matches) url = urllib.unquote_plus( matches[0] ) video_urls = [ ["[tu.tv]",url] ] for video_url in video_urls: logger.info("[tutv.py] %s - %s" % (video_url[0],video_url[1])) return video_urls
Example #25
Source File: favoritos.py From tvalacarta with GNU General Public License v3.0 | 5 votes |
def deletebookmark(fullfilename,deletepath=BOOKMARK_PATH): logger.info("tvalacarta.core.favoritos deletebookmark(fullfilename="+fullfilename+",deletepath="+deletepath+")") if not usingsamba(deletepath): os.remove( os.path.join( urllib.unquote_plus( deletepath ) , urllib.unquote_plus( fullfilename ))) else: fullfilename = fullfilename.replace("\\","/") partes = fullfilename.split("/") filename = partes[len(partes)-1] logger.info("tvalacarta.core.favoritos filename="+filename) logger.info("tvalacarta.core.favoritos deletepath="+deletepath) samba.remove_file(filename,deletepath)
Example #26
Source File: pydevd_comm.py From PyDev.Debugger with Eclipse Public License 1.0 | 5 votes |
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.scope = scope self.attrs = attrs self.style = style self.code_or_file = unquote_plus(encoded_code_or_file) self.fnname = fnname
Example #27
Source File: test_urllib.py From oss-ftp with MIT License | 5 votes |
def test_unquoting_plus(self): # Test difference between unquote() and unquote_plus() given = "are+there+spaces..." expect = given result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %s != %s" % (expect, result)) expect = given.replace('+', ' ') result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result))
Example #28
Source File: test_urllib.py From oss-ftp with MIT License | 5 votes |
def test_unquoting_parts(self): # Make sure unquoting works when have non-quoted characters # interspersed given = 'ab%sd' % hexescape('c') expect = "abcd" result = urllib.unquote(given) self.assertEqual(expect, result, "using quote(): %s != %s" % (expect, result)) result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result))
Example #29
Source File: plugintools.py From tvalacarta with GNU General Public License v3.0 | 5 votes |
def get_params(): _log("get_params") param_string = sys.argv[2] _log("get_params "+str(param_string)) commands = {} if param_string: split_commands = param_string[param_string.find('?') + 1:].split('&') for command in split_commands: _log("get_params command="+str(command)) if len(command) > 0: if "=" in command: split_command = command.split('=') key = split_command[0] value = urllib.unquote_plus(split_command[1]) commands[key] = value else: commands[command] = "" _log("get_params "+repr(commands)) return commands # Fetch text content from an URL
Example #30
Source File: plugintools.py From tvalacarta with GNU General Public License v3.0 | 5 votes |
def get_params(): _log("get_params") param_string = sys.argv[2] _log("get_params "+str(param_string)) commands = {} if param_string: split_commands = param_string[param_string.find('?') + 1:].split('&') for command in split_commands: _log("get_params command="+str(command)) if len(command) > 0: if "=" in command: split_command = command.split('=') key = split_command[0] value = urllib.unquote_plus(split_command[1]) commands[key] = value else: commands[command] = "" _log("get_params "+repr(commands)) return commands # Fetch text content from an URL