Python tables.NoSuchNodeError() Examples

The following are 7 code examples of tables.NoSuchNodeError(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tables , or try the search function .
Example #1
Source File: loader_legacy.py    From FRETBursts with GNU General Public License v2.0 6 votes vote down vote up
def load_data(self, where, name, dest_name=None, ich=None,
                  allow_missing=False, ondisk=False):
        try:
            node = self.h5file.get_node(where, name)
        except tables.NoSuchNodeError:
            if allow_missing:
                node_value = np.array([])
            else:
                self.h5file.close()
                raise IOError("Invalid file format: '%s' is missing." % name)
        else:
            node_value = node if ondisk else node.read()

        if dest_name is None:
            dest_name = hdf5_data_map.get(name, name)

        if ich is None:
            self.data.add(**{dest_name: node_value})
        else:
            if dest_name not in self.data:
                self.data.add(**{dest_name: [node_value]})
            else:
                self.data[dest_name].append(node_value) 
Example #2
Source File: loader.py    From FRETBursts with GNU General Public License v2.0 6 votes vote down vote up
def _load_alex_periods_donor_acceptor(data, meas_specs):
    # Both us- and ns-ALEX
    try:
        # Try to load alex period definitions
        D_ON = meas_specs.alex_excitation_period1.read()
        A_ON = meas_specs.alex_excitation_period2.read()
    except tables.NoSuchNodeError:
        # But if it fails it's OK, those fields are optional
        msg = """
        The current file lacks the alternation period defintion.
        You will need to manually add this info using:

          d.add(D_ON=D_ON, A_ON=A_ON)

        where `d` is a Data object and D_ON/A_ON is a tuple with start/stop
        values defining the D/A excitation excitation period. Values are in
        raw timestamps units.
        """
        log.warning(msg)
    else:
        data.add(D_ON=D_ON, A_ON=A_ON) 
Example #3
Source File: process.py    From scanorama with MIT License 5 votes vote down vote up
def load_h5(fname, genome='mm10'):
    try:
        import tables
    except ImportError:
        sys.stderr.write('Please install PyTables to read .h5 files: '
                         'https://www.pytables.org/usersguide/installation.html\n')
        exit(1)
    
    # Adapted from scanpy's read_10x_h5() method.
    with tables.open_file(str(fname), 'r') as f:
        try:
            dsets = {}
            for node in f.walk_nodes('/' + genome, 'Array'):
                dsets[node.name] = node.read()

            n_genes, n_cells = dsets['shape']
            data = dsets['data']
            if dsets['data'].dtype == np.dtype('int32'):
                data = dsets['data'].view('float32')
                data[:] = dsets['data']

            X = csr_matrix((data, dsets['indices'], dsets['indptr']),
                           shape=(n_cells, n_genes))
            genes = [ gene for gene in dsets['genes'].astype(str) ]
            assert(len(genes) == n_genes)
            assert(len(genes) == X.shape[1])
            
        except tables.NoSuchNodeError:
            raise Exception('Genome %s does not exist in this file.' % genome)
        except KeyError:
            raise Exception('File is missing one or more required datasets.')    

    return X, np.array(genes) 
Example #4
Source File: process.py    From geosketch with MIT License 5 votes vote down vote up
def load_h5(fname, genome='mm10'):
    # Adapted from scanpy's read_10x_h5() method.
    with tables.open_file(str(fname), 'r') as f:
        try:
            dsets = {}
            for node in f.walk_nodes('/' + genome, 'Array'):
                dsets[node.name] = node.read()

            n_genes, n_cells = dsets['shape']
            data = dsets['data']
            
            if dsets['data'].dtype == np.dtype('int32'):
                data = dsets['data'].view('float32')
                data[:] = dsets['data']

            X = csr_matrix((data, dsets['indices'], dsets['indptr']),
                           shape=(n_cells, n_genes))
            genes = [ gene for gene in dsets['gene_names'].astype(str) ]
            assert(len(genes) == n_genes)
            assert(len(genes) == X.shape[1])
            
        except tables.NoSuchNodeError:
            raise Exception('Genome %s does not exist in this file.' % genome)
        except KeyError:
            raise Exception('File is missing one or more required datasets.')    

    return X, np.array(genes) 
Example #5
Source File: core.py    From pyiron with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def remove_child(self):
        """
        internal function to remove command that removes also child jobs.
        Do never use this command, since it will destroy the integrity of your project.
        """
        if "server" in self.project_hdf5.list_nodes():
            server_hdf_dict = self.project_hdf5["server"]
            if (
                "qid" in server_hdf_dict.keys()
                and str(self.status) in ["submitted", "running", "collect"]
                and server_hdf_dict["qid"] is not None
            ):
                self.project._queue_delete_job(server_hdf_dict["qid"])
        with self.project_hdf5.open("..") as hdf_parent:
            try:
                del hdf_parent[self.job_name]
                shutil.rmtree(str(self.working_directory))
            except (NoSuchNodeError, KeyError, OSError):
                print(
                    "This group does not exist in the HDF5 file {}".format(
                        self.job_name
                    )
                )
        if self.project_hdf5.is_empty:
            if os.path.isfile(self.project_hdf5.file_name):
                os.remove(self.project_hdf5.file_name)
                dir_name = self.project_hdf5.file_name.split(".h5")[0] + "_hdf5"
                if os.path.isdir(dir_name):
                    os.rmdir(dir_name)
        if self.job_id:
            self.project.db.delete_item(self.job_id) 
Example #6
Source File: loader.py    From FRETBursts with GNU General Public License v2.0 5 votes vote down vote up
def _add_usALEX_specs(data, meas_specs):
    try:
        offset = meas_specs.alex_offset.read()
    except tables.NoSuchNodeError:
        log.warning('    No offset found, assuming offset = 0.')
        offset = 0
    data.add(offset=offset)
    data.add(alex_period=meas_specs.alex_period.read())
    _load_alex_periods_donor_acceptor(data, meas_specs) 
Example #7
Source File: read_array.py    From seqc with GNU General Public License v2.0 5 votes vote down vote up
def load(cls, archive_name):
        """load a ReadArray from an hdf5 archive, note that ma_pos and ma_genes are
        discarded.

        :param str archive_name: name of a .h5 archive containing a saved ReadArray object
        :return ReadArray:
        """

        f = tb.open_file(archive_name, mode='r')
        data = f.root.data.read()

        try:
            f.get_node('/genes')
            genes = f.root.genes.read()
            positions = f.root.positions.read()
        except tb.NoSuchNodeError:
            indptr = f.root.indptr.read()
            indices = f.root.indices.read()
            genes = f.root.gene_data.read()
            positions = f.root.positions_data.read()
            genes = csr_matrix((genes, indices, indptr))
            positions = csr_matrix((positions, indices, indptr))

        return cls(data, genes, positions)

    # todo document me