Python six.moves.urllib.request.pathname2url() Examples
The following are 29
code examples of six.moves.urllib.request.pathname2url().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
six.moves.urllib.request
, or try the search function
.
Example #1
Source File: plotting.py From pycbc with GNU General Public License v3.0 | 6 votes |
def make_veto_table(workflow, out_dir, vetodef_file=None, tags=None): """ Creates a node in the workflow for writing the veto_definer table. Returns a File instances for the output file. """ if vetodef_file is None: if not workflow.cp.has_option_tags("workflow-segments", "segments-veto-definer-file", []): return None vetodef_file = workflow.cp.get_opt_tags("workflow-segments", "segments-veto-definer-file", []) file_url = urljoin('file:', pathname2url(vetodef_file)) vdf_file = File(workflow.ifos, 'VETO_DEFINER', workflow.analysis_time, file_url=file_url) vdf_file.PFN(file_url, site='local') else: vdf_file = vetodef_file if tags is None: tags = [] makedir(out_dir) node = PlotExecutable(workflow.cp, 'page_vetotable', ifos=workflow.ifos, out_dir=out_dir, tags=tags).create_node() node.add_input_opt('--veto-definer-file', vdf_file) node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') workflow += node return node.output_files[0]
Example #2
Source File: ingest.py From pyramid_swagger with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_swagger_spec(settings): """Return a :class:`bravado_core.spec.Spec` constructed from the swagger specs in `pyramid_swagger.schema_directory`. If `pyramid_swagger.enable_swagger_spec_validation` is enabled the schema will be validated before returning it. :param settings: a pyramid registry settings with configuration for building a swagger schema :type settings: dict :rtype: :class:`bravado_core.spec.Spec` """ schema_dir = settings.get('pyramid_swagger.schema_directory', 'api_docs/') schema_filename = settings.get('pyramid_swagger.schema_file', 'swagger.json') schema_path = os.path.join(schema_dir, schema_filename) schema_url = urlparse.urljoin('file:', pathname2url(os.path.abspath(schema_path))) handlers = build_http_handlers(None) # don't need http_client for file: file_handler = handlers['file'] spec_dict = file_handler(schema_url) return Spec.from_dict( spec_dict, config=create_bravado_core_config(settings), origin_url=schema_url)
Example #3
Source File: spec.py From pyramid_swagger with BSD 3-Clause "New" or "Revised" License | 6 votes |
def validate_swagger_schema(schema_dir, resource_listing): """Validate the structure of Swagger schemas against the spec. **Valid only for Swagger v1.2 spec** Note: It is possible that resource_listing is not present in the schema_dir. The path is passed in the call so that ssv can fetch the api-declaration files from the path. :param resource_listing: Swagger Spec v1.2 resource listing :type resource_listing: dict :param schema_dir: A path to Swagger spec directory :type schema_dir: string :raises: :py:class:`swagger_spec_validator.SwaggerValidationError` """ schema_filepath = os.path.join(schema_dir, API_DOCS_FILENAME) swagger_spec_validator.validator12.validate_spec( resource_listing, urlparse.urljoin('file:', pathname2url(os.path.abspath(schema_filepath))), )
Example #4
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_data_utils(in_tmpdir): """Tests get_file from a url, plus extraction and validation. """ dirname = 'data_utils' with open('test.txt', 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open('test.tar.gz', 'w:gz') as tar_file: tar_file.add('test.txt') with zipfile.ZipFile('test.zip', 'w') as zip_file: zip_file.write('test.txt') origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz'))) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' hashval_sha256 = _hash_file(filepath) hashval_md5 = _hash_file(filepath, algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True) path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(filepath) assert validate_file(filepath, hashval_sha256) assert validate_file(filepath, hashval_md5) os.remove(filepath) os.remove('test.tar.gz') origin = urljoin('file://', pathname2url(os.path.abspath('test.zip'))) hashval_sha256 = _hash_file('test.zip') hashval_md5 = _hash_file('test.zip', algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True) path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(path) assert validate_file(path, hashval_sha256) assert validate_file(path, hashval_md5) os.remove(path) os.remove('test.txt') os.remove('test.zip')
Example #5
Source File: segment.py From pycbc with GNU General Public License v3.0 | 5 votes |
def add_cumulative_files(workflow, output_file, input_files, out_dir, execute_now=False, tags=None): """ Function to combine a set of segment files into a single one. This function will not merge the segment lists but keep each separate. Parameters ----------- workflow: pycbc.workflow.core.Workflow An instance of the Workflow class that manages the workflow. output_file: pycbc.workflow.core.File The output file object input_files: pycbc.workflow.core.FileList This list of input segment files out_dir : path The directory to write output to. execute_now : boolean, optional If true, jobs are executed immediately. If false, they are added to the workflow to be run later. tags : list of strings, optional A list of strings that is used to identify this job """ if tags is None: tags = [] llwadd_job = LigolwAddExecutable(workflow.cp, 'llwadd', ifo=output_file.ifo_list, out_dir=out_dir, tags=tags) add_node = llwadd_job.create_node(output_file.segment, input_files, output=output_file) if file_needs_generating(add_node.output_files[0].cache_entry.path, workflow.cp, tags=tags): if execute_now: workflow.execute_node(add_node) else: workflow.add_node(add_node) else: add_node.executed = True for fil in add_node._outputs: fil.node = None fil.PFN(urljoin('file:', pathname2url(fil.storage_path)), site='local') return add_node.output_files[0]
Example #6
Source File: core.py From pycbc with GNU General Public License v3.0 | 5 votes |
def execute_node(self, node, verbatim_exe = False): """ Execute this node immediately on the local machine """ node.executed = True # Check that the PFN is for a file or path if node.executable.needs_fetching: try: # The pfn may have been marked local... pfn = node.executable.get_pfn() except: # or it may have been marked nonlocal. That's # fine, we'll resolve the URL and make a local # entry. pfn = node.executable.get_pfn('nonlocal') resolved = resolve_url(pfn, permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) node.executable.clear_pfns() node.executable.add_pfn(urljoin('file:', pathname2url(resolved)), site='local') cmd_list = node.get_command_line() # Must execute in output directory. curr_dir = os.getcwd() out_dir = node.executable.out_dir os.chdir(out_dir) # Make call make_external_call(cmd_list, out_dir=os.path.join(out_dir, 'logs'), out_basename=node.executable.name) # Change back os.chdir(curr_dir) for fil in node._outputs: fil.node = None fil.PFN(urljoin('file:', pathname2url(fil.storage_path)), site='local')
Example #7
Source File: pegasus_workflow.py From pycbc with GNU General Public License v3.0 | 5 votes |
def from_path(cls, path): """Takes a path and returns a File object with the path as the PFN.""" urlparts = urlsplit(path) site = 'nonlocal' if (urlparts.scheme == '' or urlparts.scheme == 'file'): if os.path.isfile(urlparts.path): path = os.path.abspath(urlparts.path) path = urljoin('file:', pathname2url(path)) site = 'local' fil = File(os.path.basename(path)) fil.PFN(path, site) return fil
Example #8
Source File: grb_utils.py From pycbc with GNU General Public License v3.0 | 5 votes |
def get_ipn_sky_files(workflow, file_url, tags=None): ''' Retreive the sky point files for searching over the IPN error box and populating it with injections. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. file_url : string The URL of the IPN sky points file. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- sky_points_file : pycbc.workflow.core.File File object representing the IPN sky points file. ''' tags = tags or [] ipn_sky_points = resolve_url(file_url) sky_points_url = urljoin("file:", pathname2url(ipn_sky_points)) sky_points_file = File(workflow.ifos, "IPN_SKY_POINTS", workflow.analysis_time, file_url=sky_points_url, tags=tags) sky_points_file.PFN(sky_points_url, site="local") return sky_points_file
Example #9
Source File: schemas.py From openapi-spec-validator with Apache License 2.0 | 5 votes |
def get_openapi_schema(version): path = 'resources/schemas/v{0}/schema.json'.format(version) path_resource = resource_filename('openapi_spec_validator', path) path_full = os.path.join(os.path.dirname(__file__), path_resource) schema = read_yaml_file(path_full) schema_url = parse.urljoin('file:', request.pathname2url(path_full)) return schema, schema_url
Example #10
Source File: cli.py From http-prompt with MIT License | 5 votes |
def normalize_url(ctx, param, value): if value: if not re.search(r'^\w+://', value): value = 'file:' + pathname2url(os.path.abspath(value)) return value return None
Example #11
Source File: file_utils.py From mlflow with Apache License 2.0 | 5 votes |
def path_to_local_sqlite_uri(path): """ Convert local filesystem path to sqlite uri. """ path = posixpath.abspath(pathname2url(os.path.abspath(path))) prefix = "sqlite://" if sys.platform == "win32" else "sqlite:///" return prefix + path
Example #12
Source File: file_utils.py From mlflow with Apache License 2.0 | 5 votes |
def path_to_local_file_uri(path): """ Convert local filesystem path to local file uri. """ path = pathname2url(path) if path == posixpath.abspath(path): return "file://{path}".format(path=path) else: return "file:{path}".format(path=path)
Example #13
Source File: file_utils.py From mlflow with Apache License 2.0 | 5 votes |
def relative_path_to_artifact_path(path): if os.path == posixpath: return path if os.path.abspath(path) == path: raise Exception("This method only works with relative paths.") return unquote(pathname2url(path))
Example #14
Source File: utils.py From shade with Apache License 2.0 | 5 votes |
def normalise_file_path_to_url(path): if parse.urlparse(path).scheme: return path path = os.path.abspath(path) return parse.urljoin('file:', request.pathname2url(path))
Example #15
Source File: url.py From Cloudmare with GNU General Public License v3.0 | 5 votes |
def path_to_file_uri(path): """Convert local filesystem path to legal File URIs as described in: http://en.wikipedia.org/wiki/File_URI_scheme """ x = pathname2url(os.path.abspath(path)) if os.name == 'nt': x = x.replace('|', ':') # http://bugs.python.org/issue5861 return 'file:///%s' % x.lstrip('/')
Example #16
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_data_utils(in_tmpdir): """Tests get_file from a url, plus extraction and validation. """ dirname = 'data_utils' with open('test.txt', 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open('test.tar.gz', 'w:gz') as tar_file: tar_file.add('test.txt') with zipfile.ZipFile('test.zip', 'w') as zip_file: zip_file.write('test.txt') origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz'))) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' hashval_sha256 = _hash_file(filepath) hashval_md5 = _hash_file(filepath, algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True) path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(filepath) assert validate_file(filepath, hashval_sha256) assert validate_file(filepath, hashval_md5) os.remove(filepath) os.remove('test.tar.gz') origin = urljoin('file://', pathname2url(os.path.abspath('test.zip'))) hashval_sha256 = _hash_file('test.zip') hashval_md5 = _hash_file('test.zip', algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True) path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(path) assert validate_file(path, hashval_sha256) assert validate_file(path, hashval_md5) os.remove(path) os.remove('test.txt') os.remove('test.zip')
Example #17
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_data_utils(in_tmpdir): """Tests get_file from a url, plus extraction and validation. """ dirname = 'data_utils' with open('test.txt', 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open('test.tar.gz', 'w:gz') as tar_file: tar_file.add('test.txt') with zipfile.ZipFile('test.zip', 'w') as zip_file: zip_file.write('test.txt') origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz'))) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' hashval_sha256 = _hash_file(filepath) hashval_md5 = _hash_file(filepath, algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True) path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(filepath) assert validate_file(filepath, hashval_sha256) assert validate_file(filepath, hashval_md5) os.remove(filepath) os.remove('test.tar.gz') origin = urljoin('file://', pathname2url(os.path.abspath('test.zip'))) hashval_sha256 = _hash_file('test.zip') hashval_md5 = _hash_file('test.zip', algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True) path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(path) assert validate_file(path, hashval_sha256) assert validate_file(path, hashval_md5) os.remove(path) os.remove('test.txt') os.remove('test.zip')
Example #18
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_data_utils(in_tmpdir): """Tests get_file from a url, plus extraction and validation. """ dirname = 'data_utils' with open('test.txt', 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open('test.tar.gz', 'w:gz') as tar_file: tar_file.add('test.txt') with zipfile.ZipFile('test.zip', 'w') as zip_file: zip_file.write('test.txt') origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz'))) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' hashval_sha256 = _hash_file(filepath) hashval_md5 = _hash_file(filepath, algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True) path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(filepath) assert validate_file(filepath, hashval_sha256) assert validate_file(filepath, hashval_md5) os.remove(filepath) os.remove('test.tar.gz') origin = urljoin('file://', pathname2url(os.path.abspath('test.zip'))) hashval_sha256 = _hash_file('test.zip') hashval_md5 = _hash_file('test.zip', algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True) path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(path) assert validate_file(path, hashval_sha256) assert validate_file(path, hashval_md5) os.remove(path) os.remove('test.txt') os.remove('test.zip')
Example #19
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_data_utils(in_tmpdir): """Tests get_file from a url, plus extraction and validation. """ dirname = 'data_utils' with open('test.txt', 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open('test.tar.gz', 'w:gz') as tar_file: tar_file.add('test.txt') with zipfile.ZipFile('test.zip', 'w') as zip_file: zip_file.write('test.txt') origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz'))) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' hashval_sha256 = _hash_file(filepath) hashval_md5 = _hash_file(filepath, algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True) path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(filepath) assert validate_file(filepath, hashval_sha256) assert validate_file(filepath, hashval_md5) os.remove(filepath) os.remove('test.tar.gz') origin = urljoin('file://', pathname2url(os.path.abspath('test.zip'))) hashval_sha256 = _hash_file('test.zip') hashval_md5 = _hash_file('test.zip', algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True) path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(path) assert validate_file(path, hashval_sha256) assert validate_file(path, hashval_md5) os.remove(path) os.remove('test.txt') os.remove('test.zip')
Example #20
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_data_utils(in_tmpdir): """Tests get_file from a url, plus extraction and validation. """ dirname = 'data_utils' with open('test.txt', 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open('test.tar.gz', 'w:gz') as tar_file: tar_file.add('test.txt') with zipfile.ZipFile('test.zip', 'w') as zip_file: zip_file.write('test.txt') origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz'))) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' hashval_sha256 = _hash_file(filepath) hashval_md5 = _hash_file(filepath, algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True) path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(filepath) assert validate_file(filepath, hashval_sha256) assert validate_file(filepath, hashval_md5) os.remove(filepath) os.remove('test.tar.gz') origin = urljoin('file://', pathname2url(os.path.abspath('test.zip'))) hashval_sha256 = _hash_file('test.zip') hashval_md5 = _hash_file('test.zip', algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True) path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(path) assert validate_file(path, hashval_sha256) assert validate_file(path, hashval_md5) os.remove(path) os.remove('test.txt') os.remove('test.zip')
Example #21
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_data_utils(in_tmpdir): """Tests get_file from a url, plus extraction and validation. """ dirname = 'data_utils' with open('test.txt', 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open('test.tar.gz', 'w:gz') as tar_file: tar_file.add('test.txt') with zipfile.ZipFile('test.zip', 'w') as zip_file: zip_file.write('test.txt') origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz'))) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' hashval_sha256 = _hash_file(filepath) hashval_md5 = _hash_file(filepath, algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True) path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(filepath) assert validate_file(filepath, hashval_sha256) assert validate_file(filepath, hashval_md5) os.remove(filepath) os.remove('test.tar.gz') origin = urljoin('file://', pathname2url(os.path.abspath('test.zip'))) hashval_sha256 = _hash_file('test.zip') hashval_md5 = _hash_file('test.zip', algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True) path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(path) assert validate_file(path, hashval_sha256) assert validate_file(path, hashval_md5) os.remove(path) os.remove('test.txt') os.remove('test.zip')
Example #22
Source File: api.py From pyramid_swagger with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _build_swagger_20_schema_views(config): spec = config.registry.settings['pyramid_swagger.schema20'] walker = NodeWalkerForRefFiles() all_files = walker.walk(spec) file_map = {} def view_for_swagger_schema(request): _, ext = os.path.splitext(request.path) ext = ext.lstrip('.') base_path = config.registry.settings\ .get('pyramid_swagger.base_path_api_docs', '').rstrip('/') key_path = request.path_info[len(base_path):] actual_fname = file_map[key_path] with spec.resolver.resolving(actual_fname) as spec_dict: clean_response = strip_xscope(spec_dict) ref_walker = NodeWalkerForCleaningRefs() fixed_spec = ref_walker.walk(clean_response, ext) return fixed_spec for ref_fname in all_files: ref_fname_parts = os.path.splitext(pathname2url(ref_fname)) for schema_format in ['yaml', 'json']: route_name = 'pyramid_swagger.swagger20.api_docs.{0}.{1}'\ .format(ref_fname.replace('/', '.'), schema_format) path = '/{0}.{1}'.format(ref_fname_parts[0], schema_format) file_map[path] = ref_fname yield PyramidEndpoint( path=path, route_name=route_name, view=view_for_swagger_schema, renderer=schema_format, )
Example #23
Source File: util.py From jsonmapping with MIT License | 5 votes |
def fixture_uri(path): base = os.path.join(fixtures_dir, path) base_uri = 'file://' + pathname2url(base) with open(base, 'r') as fh: return json.load(fh), base_uri
Example #24
Source File: url.py From learn_python3_spider with MIT License | 5 votes |
def path_to_file_uri(path): """Convert local filesystem path to legal File URIs as described in: http://en.wikipedia.org/wiki/File_URI_scheme """ x = pathname2url(os.path.abspath(path)) if os.name == 'nt': x = x.replace('|', ':') # http://bugs.python.org/issue5861 return 'file:///%s' % x.lstrip('/')
Example #25
Source File: utils.py From eclcli with Apache License 2.0 | 5 votes |
def normalise_file_path_to_url(path): if parse.urlparse(path).scheme: return path path = os.path.abspath(path) return parse.urljoin('file:', request.pathname2url(path))
Example #26
Source File: test_provision.py From tox with MIT License | 5 votes |
def space_path2url(path): at_path = str(path) if " " not in at_path: return at_path return urljoin("file:", pathname2url(os.path.abspath(at_path)))
Example #27
Source File: psdfiles.py From pycbc with GNU General Public License v3.0 | 4 votes |
def setup_psd_pregenerated(workflow, tags=None): ''' Setup CBC workflow to use pregenerated psd files. The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank and pycbc_plot_psd_file. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- psd_files : pycbc.workflow.core.FileList The FileList holding the gating files ''' if tags is None: tags = [] psd_files = FileList([]) cp = workflow.cp global_seg = workflow.analysis_time user_tag = "PREGEN_PSD" # Check for one psd for all ifos try: pre_gen_file = cp.get_opt_tags('workflow-psd', 'psd-pregenerated-file', tags) pre_gen_file = resolve_url(pre_gen_file) file_url = urljoin('file:', pathname2url(pre_gen_file)) curr_file = File(workflow.ifos, user_tag, global_seg, file_url, tags=tags) curr_file.PFN(file_url, site='local') psd_files.append(curr_file) except ConfigParser.Error: # Check for one psd per ifo for ifo in workflow.ifos: try: pre_gen_file = cp.get_opt_tags('workflow-psd', 'psd-pregenerated-file-%s' % ifo.lower(), tags) pre_gen_file = resolve_url(pre_gen_file) file_url = urljoin('file:', pathname2url(pre_gen_file)) curr_file = File(ifo, user_tag, global_seg, file_url, tags=tags) curr_file.PFN(file_url, site='local') psd_files.append(curr_file) except ConfigParser.Error: # It's unlikely, but not impossible, that only some ifos # will have pregenerated PSDs logging.warn("No psd file specified for IFO %s." % (ifo,)) pass return psd_files
Example #28
Source File: core.py From pycbc with GNU General Public License v3.0 | 4 votes |
def from_segment_list_dict(cls, description, segmentlistdict, ifo_list=None, valid_segment=None, file_exists=False, seg_summ_dict=None, **kwargs): """ Initialize a SegFile object from a segmentlistdict. Parameters ------------ description : string (required) See File.__init__ segmentlistdict : ligo.segments.segmentslistdict See SegFile.__init__ ifo_list : string or list (optional) See File.__init__, if not given a list of all ifos in the segmentlistdict object will be used valid_segment : ligo.segments.segment or ligo.segments.segmentlist See File.__init__, if not given the extent of all segments in the segmentlistdict is used. file_exists : boolean (default = False) If provided and set to True it is assumed that this file already exists on disk and so there is no need to write again. seg_summ_dict : ligo.segments.segmentslistdict Optional. See SegFile.__init__. """ if ifo_list is None: ifo_set = set([i.split(':')[0] for i in segmentlistdict.keys()]) ifo_list = list(ifo_set) ifo_list.sort() if valid_segment is None: if seg_summ_dict and \ numpy.any([len(v) for _, v in seg_summ_dict.items()]): # Only come here if seg_summ_dict is supplied and it is # not empty. valid_segment = seg_summ_dict.extent_all() else: try: valid_segment = segmentlistdict.extent_all() except: # Numpty probably didn't supply a glue.segmentlistdict segmentlistdict=segments.segmentlistdict(segmentlistdict) try: valid_segment = segmentlistdict.extent_all() except ValueError: # No segment_summary and segment list is empty # Setting valid segment now is hard! warn_msg = "No information with which to set valid " warn_msg += "segment." logging.warn(warn_msg) valid_segment = segments.segment([0,1]) instnc = cls(ifo_list, description, valid_segment, segment_dict=segmentlistdict, seg_summ_dict=seg_summ_dict, **kwargs) if not file_exists: instnc.to_segment_xml() else: instnc.PFN(urljoin('file:', pathname2url(instnc.storage_path)), site='local') return instnc
Example #29
Source File: external_psc_sso_workflow.py From vsphere-automation-sdk-python with MIT License | 4 votes |
def run(self): print('\n\n#### Example: Login to vCenter server with ' 'external Platform Services Controller') print('\nStep 1: Connect to the lookup service on the ' 'Platform Services Controller node: {0}'.format(self.lsurl)) # Convert wsdl path to url self.lswsdl = parse.urljoin('file:', request.pathname2url(self.lswsdl)) lookupservicehelper = LookupServiceHelper(wsdl_url=self.lswsdl, soap_url=self.lsurl, skip_verification=self.skip_verification) lookupservicehelper.connect() if self.mgmtinstancename is None: self.mgmtinstancename, self.mgmtnodeid = lookupservicehelper.get_default_mgmt_node() elif self.mgmtnodeid is None: self.mgmtnodeid = lookupservicehelper.get_mgmt_node_id( self.mgmtinstancename) assert self.mgmtnodeid is not None print('\nStep 2: Discover the Single Sign-On service URL' ' from lookup service.') sso_url = lookupservicehelper.find_sso_url() print('Sso URL: {0}'.format(sso_url)) print('\nStep 3: Connect to the Single Sign-On URL and ' 'retrieve the SAML bearer token.') authenticator = sso.SsoAuthenticator(sso_url) context = None if self.skip_verification: context = get_unverified_context() bearer_token = authenticator.get_bearer_saml_assertion( self.username, self.password, delegatable=True, ssl_context=context) print('\nStep 4. Discover the vAPI service URL from lookup service.') vapi_url = lookupservicehelper.find_vapi_url(self.mgmtnodeid) print('vAPI URL: {0}'.format(vapi_url)) print('\nStep 5. Login to vAPI service using the SAML bearer token.') session = get_unverified_session() if self.skip_verification else None client = create_vsphere_client(server=parse.urlparse(vapi_url).hostname, bearer_token=bearer_token, session=session) # Create and Delete TagCategory to Verify connection is successful print('\nStep 3: Creating and Deleting Tag Category...\n') create_spec = client.tagging.Category.CreateSpec() create_spec.name = 'TestTag_embeded_psc_sso_workflow' create_spec.description = 'TestTagDesc' create_spec.cardinality = CategoryModel.Cardinality.MULTIPLE create_spec.associable_types = set() category_id = client.tagging.Category.create(create_spec) assert category_id is not None print('Tag category created; Id: {0}\n'.format(category_id)) # Delete TagCategory client.tagging.Category.delete(category_id)