Python six.moves.urllib.parse.unquote_plus() Examples
The following are 28
code examples of six.moves.urllib.parse.unquote_plus().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
six.moves.urllib.parse
, or try the search function
.
Example #1
Source File: url.py From pipenv with MIT License | 6 votes |
def _parse_query(self): # type: () -> URI query = self.query if self.query is not None else "" query_dict = omdict() queries = query.split("&") query_items = [] subdirectory = self.subdirectory if self.subdirectory else None for q in queries: key, _, val = q.partition("=") val = unquote_plus(val) if key == "subdirectory" and not subdirectory: subdirectory = val else: query_items.append((key, val)) query_dict.load(query_items) return attr.evolve( self, query_dict=query_dict, subdirectory=subdirectory, query=query )
Example #2
Source File: codec.py From pwnypack with MIT License | 6 votes |
def deurlquote(d, plus=False): """ Decode a percent encoded string. Args: d(str): The percent encoded value to decode. plus(bool): Parse a plus symbol as a space. Returns: str: The decoded version of the percent encoded of ``d``. Example: >>> from pwny import * >>> deurlquote('Foo+Bar/Baz') 'Foo Bar/Baz' """ return unquote_plus(d) if plus else unquote(d)
Example #3
Source File: mongo.py From integrations-core with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _parse_uri(cls, server, sanitize_username=False): """ Parses a MongoDB-formatted URI (e.g. mongodb://user:pass@server/db) and returns parsed elements and a sanitized URI. """ parsed = pymongo.uri_parser.parse_uri(server) username = parsed.get('username') password = parsed.get('password') db_name = parsed.get('database') nodelist = parsed.get('nodelist') auth_source = parsed.get('options', {}).get('authsource') # Remove password (and optionally username) from sanitized server URI. # To ensure that the `replace` works well, we first need to url-decode the raw server string # since the password parsed by pymongo is url-decoded decoded_server = unquote_plus(server) clean_server_name = decoded_server.replace(password, "*" * 5) if password else decoded_server if sanitize_username and username: username_pattern = u"{}[@:]".format(re.escape(username)) clean_server_name = re.sub(username_pattern, "", clean_server_name) return username, password, db_name, nodelist, clean_server_name, auth_source
Example #4
Source File: cas.py From sgx-kms with Apache License 2.0 | 6 votes |
def get_all(self, external_project_id, **kw): LOG.debug('Start certificate_authorities on_get') plugin_name = kw.get('plugin_name') if plugin_name is not None: plugin_name = parse.unquote_plus(plugin_name) plugin_ca_id = kw.get('plugin_ca_id', None) if plugin_ca_id is not None: plugin_ca_id = parse.unquote_plus(plugin_ca_id) # refresh CA table, in case plugin entries have expired cert_resources.refresh_certificate_resources() project_model = res.get_or_create_project(external_project_id) cas, offset, limit, total = self._get_subcas_and_root_cas( offset=kw.get('offset', 0), limit=kw.get('limit', None), plugin_name=plugin_name, plugin_ca_id=plugin_ca_id, project_id=project_model.id) return self._display_cas(cas, offset, limit, total)
Example #5
Source File: url.py From requirementslib with MIT License | 6 votes |
def _parse_query(self): # type: () -> URI query = self.query if self.query is not None else "" query_dict = omdict() queries = query.split("&") query_items = [] subdirectory = self.subdirectory if self.subdirectory else None for q in queries: key, _, val = q.partition("=") val = unquote_plus(val) if key == "subdirectory" and not subdirectory: subdirectory = val else: query_items.append((key, val)) query_dict.load(query_items) return attr.evolve( self, query_dict=query_dict, subdirectory=subdirectory, query=query )
Example #6
Source File: test_tasks_single_scrapyd.py From scrapydweb with GNU General Public License v3.0 | 6 votes |
def test_task_xhr_delete_a_task_with_job(app, client): task_id = metadata['task_id'] # 'schedule.check' in test_check_with_task() text, __ = req_single_scrapyd(app, client, view='schedule.run', kws=dict(node=NODE), data=dict(filename=FILENAME_DV), location=metadata['location']) new_task_id = int(re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)).group(1)) assert new_task_id - task_id == 1 sleep() # Wait until the first execution finish __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=new_task_id)) check_dumped_task_data(js, version=cst.DEFAULT_LATEST_VERSION) __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list')) assert new_task_id in js['ids'] tip = "apscheduler_job #{id} removed. Task #{id} deleted".format(id=new_task_id) req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='delete', task_id=new_task_id), jskws=dict(status=cst.OK, tip=tip)) __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list')) assert new_task_id not in js['ids'] message = "apscheduler_job #{id} not found. Task #{id} not found. ".format(id=new_task_id) __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=new_task_id), jskws=dict(status=cst.ERROR, message=message)) assert js['data'] is None
Example #7
Source File: test_tasks.py From scrapydweb with GNU General Public License v3.0 | 6 votes |
def test_run_with_task(app, client): # ScrapydWeb_demo.egg: custom_settings = {}, also output specific settings & arguments in the log upload_file_deploy(app, client, filename='ScrapydWeb_demo_no_request.egg', project=cst.PROJECT, redirect_project=cst.PROJECT) req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='enable'), ins='STATE_RUNNING', nos='STATE_PAUSED') with app.test_request_context(): text, __ = req(app, client, view='schedule.run', kws=dict(node=NODE), data=run_data, location=url_for('tasks', node=NODE)) m = re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)) task_id = int(m.group(1)) print("task_id: %s" % task_id) metadata['task_id'] = task_id __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert js['data']['selected_nodes'] == [1, 2]
Example #8
Source File: test_spark.py From integrations-core with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, url): parts = urlparse(url) _query = frozenset(parse_qsl(parts.query)) _path = unquote_plus(parts.path) parts = parts._replace(query=_query, path=_path) self.parts = parts
Example #9
Source File: test_external.py From oslo.policy with Apache License 2.0 | 5 votes |
def decode_post_data(self, post_data): result = {} for item in post_data.split('&'): key, _sep, value = item.partition('=') result[key] = jsonutils.loads(urlparse.unquote_plus(value)) return result
Example #10
Source File: test_external.py From oslo.policy with Apache License 2.0 | 5 votes |
def decode_post_data(self, post_data): result = {} for item in post_data.split('&'): key, _sep, value = item.partition('=') result[key] = jsonutils.loads(urlparse.unquote_plus(value)) return result
Example #11
Source File: url.py From requirementslib with MIT License | 5 votes |
def _parse_fragment(self): # type: () -> URI subdirectory = self.subdirectory if self.subdirectory else "" fragment = self.fragment if self.fragment else "" if self.fragment is None: return self fragments = self.fragment.split("&") fragment_items = {} name = self.name if self.name else "" extras = self.extras for q in fragments: key, _, val = q.partition("=") val = unquote_plus(val) fragment_items[key] = val if key == "egg": from .utils import parse_extras name, stripped_extras = pip_shims.shims._strip_extras(val) if stripped_extras: extras = tuple(parse_extras(stripped_extras)) elif key == "subdirectory": subdirectory = val return attr.evolve( self, fragment_dict=fragment_items, subdirectory=subdirectory, fragment=fragment, extras=extras, name=name, )
Example #12
Source File: transportkeys.py From barbican with Apache License 2.0 | 5 votes |
def on_get(self, external_project_id, **kw): LOG.debug('Start transport_keys on_get') plugin_name = kw.get('plugin_name', None) if plugin_name is not None: plugin_name = parse.unquote_plus(plugin_name) result = self.repo.get_by_create_date( plugin_name=plugin_name, offset_arg=kw.get('offset', 0), limit_arg=kw.get('limit', None), suppress_exception=True ) transport_keys, offset, limit, total = result if not transport_keys: transport_keys_resp_overall = {'transport_keys': [], 'total': total} else: transport_keys_resp = [ hrefs.convert_transport_key_to_href(s.id) for s in transport_keys ] transport_keys_resp_overall = hrefs.add_nav_hrefs( 'transport_keys', offset, limit, total, {'transport_keys': transport_keys_resp} ) transport_keys_resp_overall.update({'total': total}) return transport_keys_resp_overall
Example #13
Source File: test_tasks.py From scrapydweb with GNU General Public License v3.0 | 5 votes |
def test_execute_task_exception(app, client): check_data_ = dict(check_data) check_data_.update(action='add') req(app, client, view='schedule.check', kws=dict(node=NODE), data=check_data_, jskws=dict(cmd="-d _version=%s" % cst.VERSION, filename=FILENAME)) with app.test_request_context(): text, __ = req(app, client, view='schedule.run', kws=dict(node=NODE), data=run_data, location=url_for('tasks', node=NODE)) m = re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)) task_id = int(m.group(1)) print("task_id: %s" % task_id) __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert js['data']['selected_nodes'] == [1, 2] # req_single_scrapyd would set single_scrapyd=True req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=1, action='fire', task_id=task_id)) sleep() req(app, client, view='tasks', kws=dict(node=1), ins=["id: %s," % task_id, "prev_run_result: 'FAIL 1, PASS 1',", "fail_times: 1,", "run_times: 'FAIL 1 / 1',"]) text, __ = req(app, client, view='tasks', kws=dict(node=1, task_id=task_id), ins=["fail_count: 1,", "pass_count: 1,", ":total='1'"]) with app.test_request_context(): url_delete = url_for('tasks.xhr', node=1, action='delete', task_id=task_id) # in the task results page: url_action: '/1/tasks/xhr/delete/5/10/', task_result_id = int(re.search(r'%s(\d+)/' % url_delete, text).group(1)) print("task_result_id: %s" % task_result_id) # In baseview.py: assert 0 < self.node <= self.SCRAPYD_SERVERS_AMOUNT # Note that AssertionError would be raise directly in test, whereas internal_server_error() would return 500.html # instead when the app is actually running, getting '500 error node index error: 2, which should be between 1 and 1' req(app, client, view='tasks', kws=dict(node=1, task_id=task_id, task_result_id=task_result_id), ins=["node: 1,", "server: '%s'," % app.config['SCRAPYD_SERVERS'][0], "status_code: 200,", "status: 'ok',", "node: 2,", "status_code: -1,", "status: 'exception',", "node index error", ":total='2'"]) req(app, client, view='tasks.xhr', kws=dict(node=1, action='delete', task_id=task_id))
Example #14
Source File: test_tasks_single_scrapyd.py From scrapydweb with GNU General Public License v3.0 | 5 votes |
def test_execute_task_fail(app, client): data = dict(DATA) # set_to_second req_single_scrapyd(app, client, view='schedule.check', kws=dict(node=NODE), data=data, set_to_second=True, jskws=dict(cmd="-d _version=%s" % cst.VERSION, filename=FILENAME)) text, __ = req_single_scrapyd(app, client, view='schedule.run', kws=dict(node=NODE), data=dict(filename=FILENAME), location=metadata['location']) task_id = int(re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)).group(1)) print("task_id: %s" % task_id) # For compatibility with postgresql metadata['task_id'] = task_id sleep(2) # The first execution has not finished yet req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE), ins=["id: %s," % task_id, "prev_run_result: 'FAIL 0, PASS 0',"]) text, __ = req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE, task_id=task_id), ins=["fail_count: 0,", "pass_count: 0,", ":total='1'"]) # in the task results page: url_action: '/1/tasks/xhr/delete/5/10/', with app.test_request_context(): url_delete = url_for('tasks.xhr', node=NODE, action='delete', task_id=task_id) task_result_id = int(re.search(r'%s(\d+)/' % url_delete, text).group(1)) print("task_result_id: %s" % task_result_id) sleep(28) req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE), ins=["id: %s," % task_id, "prev_run_result: 'FAIL 1, PASS 0',"]) req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE, task_id=task_id), ins=["status_code: -1,", "status: 'error',", "Max retries exceeded", ":total='1'"], nos="node: %s," % NODE) req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE, task_id=task_id, task_result_id=task_result_id), ins=["node: %s," % NODE, "status_code: -1,", "status: 'error',", "Max retries exceeded", ":total='1'"]) req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='delete', task_id=task_id))
Example #15
Source File: test_tasks_single_scrapyd.py From scrapydweb with GNU General Public License v3.0 | 5 votes |
def test_add_task_with_default_values(app, client): data = dict(DATA) for k in data.keys(): if k in ['jitter', 'misfire_grace_time', 'max_instances']: data[k] = 'invalid int' elif k not in ['project', '_version', 'spider', 'trigger']: # if not request.form.get('trigger'): return data[k] = '' req_single_scrapyd(app, client, view='schedule.check', kws=dict(node=NODE), data=data, jskws=dict(cmd="-d _version=%s" % cst.VERSION, filename=FILENAME)) text, __ = req_single_scrapyd(app, client, view='schedule.run', kws=dict(node=NODE), data=dict(filename=FILENAME), location=metadata['location']) sleep() task_id = int(re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)).group(1)) print("task_id: %s" % task_id) __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert js['data']['settings_arguments'] == {'setting': []} assert js['data']['selected_nodes'] == [1] assert js['data']['timezone'] is None assert js['data']['apscheduler_job']['misfire_grace_time'] == 600 assert js['data']['apscheduler_job']['coalesce'] is True assert js['data']['apscheduler_job']['max_instances'] == 1 assert js['data']['apscheduler_job']['name'] == 'task_%s' % task_id assert ':00:00' in js['data']['apscheduler_job']['next_run_time'] for k, v in js['data']['apscheduler_job']['trigger'].items(): if k in ['start_date', 'end_date']: assert v is None elif k in ['minute', 'second']: assert v == '0' elif k == 'jitter': assert v == 0 elif k == 'timezone': assert v == str(get_localzone()) else: assert v == '*' req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='delete', task_id=task_id))
Example #16
Source File: test_tasks_single_scrapyd.py From scrapydweb with GNU General Public License v3.0 | 5 votes |
def test_add_task_action_add(app, client): data_dv = dict(DATA_DV) data_dv.update(action='add', hour='*', minute='*', second='*') req_single_scrapyd(app, client, view='schedule.check', kws=dict(node=NODE), data=data_dv, jskws=dict(filename=FILENAME_DV)) text, __ = req_single_scrapyd(app, client, view='schedule.run', kws=dict(node=NODE), data=dict(filename=FILENAME_DV), location=metadata['location']) m = re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)) task_id = int(m.group(1)) print("task_id: %s" % task_id) next_run_time = m.group(2) print("next_run_time: %s" % next_run_time) assert next_run_time == "2036-12-31 00:00:00+08:00" __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list')) assert task_id in js['ids'] __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert js['data']['apscheduler_job']['next_run_time'] == next_run_time with app.test_request_context(): url_pause = url_for('tasks.xhr', node=NODE, action='pause', task_id=task_id) url_resume = url_for('tasks.xhr', node=NODE, action='resume', task_id=task_id) req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE), ins=[url_pause, "prev_run_result: 'N/A',", "next_run_time: '%s'," % next_run_time, "fail_times: 0,", "run_times: 0,"], nos=url_resume) req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE, task_id=task_id), ins=[TITLE_DV, 'label="Pass count"', ":total='0'"]) req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE, task_id=task_id, task_result_id=cst.BIGINT), ins=[TITLE_DV, 'label="Server"', ":total='0'"]) req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='delete', task_id=task_id)) # check POST "action": "add_pause"
Example #17
Source File: test_tasks_single_scrapyd.py From scrapydweb with GNU General Public License v3.0 | 5 votes |
def test_edit_to_new_a_task(app, client): day_of_week = '*' task_id = metadata['task_id'] __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) create_time = js['data']['create_time'] update_time = js['data']['update_time'] data = dict(DATA) data.update(task_id=task_id, replace_existing='False', day_of_week=day_of_week) req_single_scrapyd(app, client, view='schedule.check', kws=dict(node=NODE), data=data, jskws=dict(cmd="-d _version=%s" % cst.VERSION, filename=FILENAME)) text, __ = req_single_scrapyd(app, client, view='schedule.run', kws=dict(node=NODE), data=dict(filename=FILENAME), location=metadata['location']) new_task_id = int(re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)).group(1)) print("new_task_id: %s" % new_task_id) # assert new_task_id == task_id + 1 # For compatibility with postgresql, though test_task_xhr_delete_a_task_with_job is executed before # https://stackoverflow.com/questions/9984196/postgresql-gapless-sequences assert new_task_id > task_id __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list')) assert task_id in js['ids'] and new_task_id in js['ids'] __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert js['data']['create_time'] == create_time assert js['data']['update_time'] == update_time check_dumped_task_data(js, day_of_week='mon-fri') sleep() __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=new_task_id)) check_dumped_task_data(js, day_of_week=day_of_week) req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE), ins=["id: %s," % task_id, "day_of_week: 'mon-fri',", "id: %s," % new_task_id, "day_of_week: '%s'," % day_of_week]) req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='delete', task_id=new_task_id), jskws=dict(status=cst.OK, tip="Task #%s deleted" % new_task_id)) # POST data contains "task_id": "1", "replace_existing": "False",
Example #18
Source File: test_tasks_single_scrapyd.py From scrapydweb with GNU General Public License v3.0 | 5 votes |
def test_edit_to_update_a_task(app, client): day_of_week = 'mon-fri' task_id = metadata['task_id'] __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) create_time = js['data']['create_time'] update_time = js['data']['update_time'] data = dict(DATA) data.update(task_id=task_id, day_of_week=day_of_week) # modify day_of_week only req_single_scrapyd(app, client, view='schedule.check', kws=dict(node=NODE), data=data, jskws=dict(cmd="-d _version=%s" % cst.VERSION, filename=FILENAME)) text, __ = req_single_scrapyd(app, client, view='schedule.run', kws=dict(node=NODE), data=dict(filename=FILENAME), location=metadata['location']) assert int(re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)).group(1)) == task_id __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list')) assert task_id in js['ids'] and (task_id + 1) not in js['ids'] sleep() __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert js['data']['create_time'] == create_time assert js['data']['update_time'] > update_time check_dumped_task_data(js, day_of_week=day_of_week) req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id + 1), jskws=dict(data=None, status=cst.ERROR, message="Task #%s not found" % (task_id + 1))) req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE), ins=["id: %s," % task_id, "day_of_week: '%s'," % day_of_week], nos="id: %s," % (task_id + 1))
Example #19
Source File: client.py From python-saml with MIT License | 5 votes |
def receive(method, query_string, body): # Determine the protocol used and pare the appropriate data. method = method.upper() if method == 'GET': data = parse_qs(_text(query_string)) binding = 'artifact' if 'SAMLArtifact' in data else 'redirect' elif method == 'POST': data = parse_qs(_text(body)) binding = 'post' else: # Unknown method used. return None if binding in ('redirect', 'post'): # Pull the text out of the query. encoded = data.get('SAMLResponse', data.get('SAMLRequest')) if not encoded: # No SAML message found. return None # Decode the text. text = base64.b64decode(encoded[0]) if binding == "redirect": text = zlib.decompress(text, -15) # Parse the text into xml. message = etree.XML(text) # Get the relay state if present. relay_state = data.get('RelayState') if relay_state: relay_state = unquote_plus(relay_state[0]) # Return the message and the relay state. return message, relay_state
Example #20
Source File: transportkeys.py From sgx-kms with Apache License 2.0 | 5 votes |
def on_get(self, external_project_id, **kw): LOG.debug('Start transport_keys on_get') plugin_name = kw.get('plugin_name', None) if plugin_name is not None: plugin_name = parse.unquote_plus(plugin_name) result = self.repo.get_by_create_date( plugin_name=plugin_name, offset_arg=kw.get('offset', 0), limit_arg=kw.get('limit', None), suppress_exception=True ) transport_keys, offset, limit, total = result if not transport_keys: transport_keys_resp_overall = {'transport_keys': [], 'total': total} else: transport_keys_resp = [ hrefs.convert_transport_key_to_href(s.id) for s in transport_keys ] transport_keys_resp_overall = hrefs.add_nav_hrefs( 'transport_keys', offset, limit, total, {'transport_keys': transport_keys_resp} ) transport_keys_resp_overall.update({'total': total}) return transport_keys_resp_overall
Example #21
Source File: cas.py From sgx-kms with Apache License 2.0 | 5 votes |
def on_get(self, external_project_id, **kw): LOG.debug('Start certificate_authorities on_get (limited)') plugin_name = kw.get('plugin_name') if plugin_name is not None: plugin_name = parse.unquote_plus(plugin_name) plugin_ca_id = kw.get('plugin_ca_id', None) if plugin_ca_id is not None: plugin_ca_id = parse.unquote_plus(plugin_ca_id) # refresh CA table, in case plugin entries have expired cert_resources.refresh_certificate_resources() project_model = res.get_or_create_project(external_project_id) if self._project_cas_defined(project_model.id): cas, offset, limit, total = self._get_subcas_and_project_cas( offset=kw.get('offset', 0), limit=kw.get('limit', None), plugin_name=plugin_name, plugin_ca_id=plugin_ca_id, project_id=project_model.id) else: cas, offset, limit, total = self._get_subcas_and_root_cas( offset=kw.get('offset', 0), limit=kw.get('limit', None), plugin_name=plugin_name, plugin_ca_id=plugin_ca_id, project_id=project_model.id) return self._display_cas(cas, offset, limit, total)
Example #22
Source File: url.py From pipenv with MIT License | 5 votes |
def _parse_fragment(self): # type: () -> URI subdirectory = self.subdirectory if self.subdirectory else "" fragment = self.fragment if self.fragment else "" if self.fragment is None: return self fragments = self.fragment.split("&") fragment_items = {} name = self.name if self.name else "" extras = self.extras for q in fragments: key, _, val = q.partition("=") val = unquote_plus(val) fragment_items[key] = val if key == "egg": from .utils import parse_extras name, stripped_extras = pip_shims.shims._strip_extras(val) if stripped_extras: extras = tuple(parse_extras(stripped_extras)) elif key == "subdirectory": subdirectory = val return attr.evolve( self, fragment_dict=fragment_items, subdirectory=subdirectory, fragment=fragment, extras=extras, name=name, )
Example #23
Source File: test_tasks.py From scrapydweb with GNU General Public License v3.0 | 4 votes |
def test_edit_task(app, client): task_id = metadata['task_id'] # http://127.0.0.1:5000/1/schedule/?task_id=1 req(app, client, view='schedule', kws=dict(node=NODE, task_id=task_id), ins=["checked />[1] %s" % app.config['SCRAPYD_SERVERS'][0], "checked />[2] %s" % app.config['SCRAPYD_SERVERS'][-1]]) check_data_ = dict(check_data) check_data_.update(task_id=task_id, hour='6') req(app, client, view='schedule.check', kws=dict(node=NODE), data=check_data_, jskws=dict(cmd="-d _version=%s" % cst.VERSION, filename=FILENAME)) with app.test_request_context(): metadata['location'] = url_for('tasks', node=NODE) text, __ = req(app, client, view='schedule.run', kws=dict(node=NODE), data=run_data_single_scrapyd, location=metadata['location']) m = re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)) assert int(m.group(1)) == task_id __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert js['data']['selected_nodes'] == [1] sleep() req(app, client, view='tasks', kws=dict(node=NODE), ins=["fail_times: 1,", "run_times: 'FAIL 1 / 2',"]) text, __ = req(app, client, view='tasks', kws=dict(node=NODE, task_id=task_id), ins=["fail_count: 0,", "fail_count: 1,", "pass_count: 1,", ":total='2'"]) with app.test_request_context(): url_delete = url_for('tasks.xhr', node=NODE, action='delete', task_id=task_id) # in the task results page: url_action: '/1/tasks/xhr/delete/5/10/', new_task_result_id = int(re.search(r'%s(\d+)/' % url_delete, text).group(1)) print("new_task_result_id: %s" % new_task_result_id) req(app, client, view='tasks', kws=dict(node=NODE, task_id=task_id, task_result_id=new_task_result_id), ins=["node: 1,", "server: '%s'," % app.config['SCRAPYD_SERVERS'][0], "status_code: 200,", "status: 'ok',", ":total='1'"]) __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert '06:00:00' in js['data']['apscheduler_job']['next_run_time'] req(app, client, view='schedule', kws=dict(node=NODE, task_id=task_id), ins="checked />[1] %s" % app.config['SCRAPYD_SERVERS'][0], nos="checked />[2] %s" % app.config['SCRAPYD_SERVERS'][-1]) # ['selected_nodes'] == [1] in test_edit_task() above # switch between task_results.html and task_results_with_job.html
Example #24
Source File: test_tasks.py From scrapydweb with GNU General Public License v3.0 | 4 votes |
def test_task_start_execute_end(app, client): while True: now_datetime = datetime.now() if now_datetime.second % 10 != 1: sleep(1) else: break start_datetime = now_datetime + timedelta(seconds=8) first_execute_datetime = now_datetime + timedelta(seconds=9) second_execute_datetime = now_datetime + timedelta(seconds=14) end_datetime = now_datetime + timedelta(seconds=18) check_data_ = dict(check_data) check_data_.update(action='add', hour='*', minute='*', second='*/5', start_date=start_datetime.strftime("%Y-%m-%d %H:%M:%S"), end_date=end_datetime.strftime("%Y-%m-%d %H:%M:%S")) req(app, client, view='schedule.check', kws=dict(node=NODE), data=check_data_, jskws=dict(cmd="-d _version=%s" % cst.VERSION, filename=FILENAME)) text, __ = req(app, client, view='schedule.run', kws=dict(node=NODE), data=run_data_single_scrapyd, location=metadata['location']) m = re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)) task_id = int(m.group(1)) print("task_id: %s" % task_id) with app.test_request_context(): url_pause = url_for('tasks.xhr', node=NODE, action='pause', task_id=task_id) url_resume = url_for('tasks.xhr', node=NODE, action='resume', task_id=task_id) url_delete = url_for('tasks.xhr', node=NODE, action='delete', task_id=task_id) url_task_results = url_for('tasks', node=NODE, task_id=task_id) req(app, client, view='tasks', kws=dict(node=NODE), ins=[url_pause, url_task_results, "id: %s," % task_id, "prev_run_result: '%s'," % cst.NA, "run_times: 0,"], nos=[url_resume, url_delete]) __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert first_execute_datetime.strftime("%Y-%m-%d %H:%M:%S") in js['data']['apscheduler_job']['next_run_time'] sleep(10) # The first execution may or may not has finished req(app, client, view='tasks', kws=dict(node=NODE), ins=["id: %s," % task_id, "run_times: 1,"]) req(app, client, view='tasks', kws=dict(node=NODE), ins=[url_pause, url_task_results, "id: %s," % task_id, "run_times: 1,"], nos=[url_resume, url_delete]) req(app, client, view='tasks', kws=dict(node=NODE, task_id=task_id), ins=":total='1'") __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert second_execute_datetime.strftime("%Y-%m-%d %H:%M:%S") in js['data']['apscheduler_job']['next_run_time'] sleep(10) req(app, client, view='tasks', kws=dict(node=NODE), ins=["id: %s," % task_id, "run_times: 2,"]) req(app, client, view='tasks', kws=dict(node=NODE), ins=[url_delete, url_task_results, "id: %s," % task_id, "next_run_time: '%s'," % cst.NA, "run_times: 2,"], nos=[url_pause, url_resume]) req(app, client, view='tasks', kws=dict(node=NODE, task_id=task_id), ins=["status_code: 200,", "status: 'ok',", ":total='2'"], nos=["status_code: -1,", "status: 'error',"]) __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert js['data']['apscheduler_job'] is None req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='delete', task_id=task_id)) # Visit Timer Tasks: remove_apscheduler_job_without_task() # execute_task(): if not task: apscheduler_job.remove()
Example #25
Source File: test_tasks.py From scrapydweb with GNU General Public License v3.0 | 4 votes |
def test_delete_task_or_task_result_on_the_fly(app, client): for kind in ['delete_task', 'delete_task_result']: check_data_ = dict(check_data) req(app, client, view='schedule.check', kws=dict(node=NODE), data=check_data_, jskws=dict(cmd="-d _version=%s" % cst.VERSION, filename=FILENAME)) with app.test_request_context(): text, __ = req(app, client, view='schedule.run', kws=dict(node=NODE), data=run_data, location=url_for('tasks', node=NODE)) m = re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)) task_id = int(m.group(1)) print("task_id: %s" % task_id) __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) assert js['data']['selected_nodes'] == [1, 2] sleep(2) # the first execution has not finished yet __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list', task_id=task_id)) assert len(js['ids']) == 1 task_result_id = js['ids'][0] __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list', task_id=task_id, task_result_id=task_result_id)) assert len(js['ids']) == 1 if kind == 'delete_task': req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='delete', task_id=task_id)) else: req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='delete', task_id=task_id, task_result_id=task_result_id)) __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list')) if kind == 'delete_task': assert task_id not in js['ids'] else: assert task_id in js['ids'] __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list', task_id=task_id)) assert len(js['ids']) == 0 __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list', task_id=task_id, task_result_id=task_result_id)) assert len(js['ids']) == 0 sleep(28) req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='delete', task_id=task_id, task_result_id=task_result_id)) __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list', task_id=task_id)) assert len(js['ids']) == 0 __, js = req(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list', task_id=task_id, task_result_id=task_result_id)) assert len(js['ids']) == 0 req(app, client, view='tasks.xhr', kws=dict(node=1, action='delete', task_id=task_id))
Example #26
Source File: test_tasks_single_scrapyd.py From scrapydweb with GNU General Public License v3.0 | 4 votes |
def test_run_with_task(app, client): # ScrapydWeb_demo.egg: custom_settings = {}, also output specific settings & arguments in the log upload_file_deploy(app, client, filename='ScrapydWeb_demo_no_request.egg', project=cst.PROJECT, redirect_project=cst.PROJECT) with app.test_request_context(): metadata['location'] = url_for('tasks', node=NODE) # 'schedule.check' in test_check_with_task() text, __ = req_single_scrapyd(app, client, view='schedule.run', kws=dict(node=NODE), data=dict(filename=FILENAME), location=metadata['location']) sleep() m = re.search(cst.TASK_NEXT_RUN_TIME_PATTERN, unquote_plus(text)) task_id = int(m.group(1)) next_run_time = m.group(2) print("task_id: %s" % task_id) print("next_run_time: %s" % next_run_time) metadata['task_id'] = task_id metadata['next_run_time'] = next_run_time __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='list')) assert task_id in js['ids'] __, js = req_single_scrapyd(app, client, view='tasks.xhr', kws=dict(node=NODE, action='dump', task_id=task_id)) check_dumped_task_data(js) text, __ = req_single_scrapyd(app, client, view='tasks', kws=dict(node=NODE)) jobid = re.search(r'/(task_%s_[\w-]+?)/' % task_id, text).group(1) # extract jobid from url_stats in tasks print("jobid: %s" % jobid) metadata['jobid'] = jobid ins = [ 'JOB: %s' % jobid, 'USER_AGENT: Mozilla/5.0 (iPhone', 'ROBOTSTXT_OBEY: True', 'COOKIES_ENABLED: False', 'CONCURRENT_REQUESTS: 5', 'DOWNLOAD_DELAY: 10', 'CLOSESPIDER_TIMEOUT: 120', 'CLOSESPIDER_PAGECOUNT: 20', (u'self.arg1: %s' % VALUE).replace("'", ''').replace('"', '"') ] # In utf8 page: <div id="log"> [test] DEBUG: self.arg1: Test' "测试 </pre> # https://stackoverflow.com/questions/2087370/decode-html-entities-in-python-string req_single_scrapyd(app, client, view='log', kws=dict(node=NODE, opt='utf8', project=cst.PROJECT, spider=cst.SPIDER, job=jobid), ins=ins) req_single_scrapyd(app, client, view='api', kws=dict(node=NODE, opt='forcestop', project=cst.PROJECT, version_spider_job=jobid))
Example #27
Source File: secrets.py From sgx-kms with Apache License 2.0 | 4 votes |
def on_get(self, external_project_id, **kw): def secret_fields(field): return putil.mime_types.augment_fields_with_content_types(field) LOG.debug('Start secrets on_get ' 'for project-ID %s:', external_project_id) name = kw.get('name', '') if name: name = parse.unquote_plus(name) bits = kw.get('bits', 0) try: bits = int(bits) except ValueError: # as per Github issue 171, if bits is invalid then # the default should be used. bits = 0 ctxt = controllers._get_barbican_context(pecan.request) user_id = None if ctxt: user_id = ctxt.user result = self.secret_repo.get_by_create_date( external_project_id, offset_arg=kw.get('offset', 0), limit_arg=kw.get('limit', None), name=name, alg=kw.get('alg'), mode=kw.get('mode'), bits=bits, suppress_exception=True, acl_only=kw.get('acl_only', None), user_id=user_id ) secrets, offset, limit, total = result if not secrets: secrets_resp_overall = {'secrets': [], 'total': total} else: secrets_resp = [ hrefs.convert_to_hrefs(secret_fields(s)) for s in secrets ] secrets_resp_overall = hrefs.add_nav_hrefs( 'secrets', offset, limit, total, {'secrets': secrets_resp} ) secrets_resp_overall.update({'total': total}) LOG.info(u._LI('Retrieved secret list for project: %s'), external_project_id) return secrets_resp_overall
Example #28
Source File: glyph.py From graphite-api with Apache License 2.0 | 4 votes |
def __init__(self, **params): self.params = params self.data = params['data'] self.dataLeft = [] self.dataRight = [] self.secondYAxis = False self.width = int(params.get('width', 200)) self.height = int(params.get('height', 200)) self.margin = int(params.get('margin', 10)) self.userTimeZone = params.get('tz') self.logBase = params.get('logBase', None) self.minorY = int(params.get('minorY', 1)) if self.logBase: if self.logBase == 'e': self.logBase = math.e elif self.logBase <= 1: self.logBase = None params['logBase'] = None else: self.logBase = float(self.logBase) if self.margin < 0: self.margin = 10 self.setupCairo(params.get('outputFormat', 'png').lower()) self.area = { 'xmin': self.margin + 10, # Need extra room when the time is # near the left edge 'xmax': self.width - self.margin, 'ymin': self.margin, 'ymax': self.height - self.margin, } self.loadTemplate(params.get('template', 'default')) opts = self.ctx.get_font_options() opts.set_antialias(cairo.ANTIALIAS_NONE) self.ctx.set_font_options(opts) self.foregroundColor = params.get('fgcolor', self.defaultForeground) self.backgroundColor = params.get('bgcolor', self.defaultBackground) self.setColor(self.backgroundColor) self.drawRectangle(0, 0, self.width, self.height) if 'colorList' in params: colorList = unquote_plus(str(params['colorList'])).split(',') else: colorList = self.defaultColorList self.colors = itertools.cycle(colorList) self.drawGraph(**params)