Python tables.UInt8Atom() Examples

The following are 11 code examples of tables.UInt8Atom(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tables , or try the search function .
Example #1
Source File: bam2h5.py    From WASP with Apache License 2.0 6 votes vote down vote up
def create_carray(h5f, chrom, data_type):
    if data_type == "uint8":
        atom = tables.UInt8Atom(dflt=0)
    elif data_type == "uint16":
        atom = tables.UInt16Atom(dflt=0)
    else:
        raise NotImplementedError("unsupported datatype %s" % data_type)

    zlib_filter = tables.Filters(complevel=1, complib="zlib")

    # create CArray for this chromosome
    shape = [chrom.length]
    carray = h5f.create_carray(h5f.root, chrom.name,
                              atom, shape, filters=zlib_filter)

    return carray 
Example #2
Source File: svhn.py    From batchup with MIT License 6 votes vote down vote up
def fetch_svhn_extra(source_paths, target_path):
    extra_path = source_paths[0]

    print('Converting {} to HDF5 (compressed)...'.format(extra_path))
    f_out = tables.open_file(target_path, mode='w')
    g_out = f_out.create_group(f_out.root, 'svhn', 'SVHN data')
    filters = tables.Filters(complevel=9, complib='blosc')
    X_u8_arr = f_out.create_earray(
        g_out, 'extra_X_u8', tables.UInt8Atom(), (0, 3, 32, 32),
        filters=filters)
    y_arr = f_out.create_earray(
        g_out, 'extra_y', tables.Int32Atom(), (0,), filters=filters)

    # Load in the extra data Matlab file
    _insert_svhn_matlab_to_h5(X_u8_arr, y_arr, extra_path)

    f_out.close()

    return target_path 
Example #3
Source File: test_hdf5.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def setUp(self):
        num_rows = 500
        filters = tables.Filters(complib='blosc', complevel=5)
        h5file = tables.open_file(
            'tmp.h5', mode='w', title='Test', filters=filters)
        group = h5file.create_group("/", 'Data')
        atom = tables.UInt8Atom()
        y = h5file.create_carray(group, 'y', atom=atom, title='Data targets',
                                 shape=(num_rows, 1), filters=filters)
        for i in range(num_rows):
            y[i] = i
        h5file.flush()
        h5file.close()
        self.dataset = PytablesDataset('tmp.h5', ('y',), 20, 500)
        self.dataset_default = PytablesDataset('tmp.h5', ('y',)) 
Example #4
Source File: compressVideo.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def createImgGroup(fid, name, tot_frames, im_height, im_width, is_expandable=True):
    parentnode, _, name = name.rpartition('/')
    parentnode += '/'

    if is_expandable:
        img_dataset = fid.create_earray(
                        parentnode,
                        name,
                        atom=tables.UInt8Atom(),
                        shape =(0,
                             im_height,
                             im_width),
                        chunkshape=(1,
                             im_height,
                             im_width),
                        expectedrows=tot_frames,
                        filters=TABLE_FILTERS
                        )
    else:
        img_dataset = fid.create_carray(
                        parentnode,
                        name,
                        atom=tables.UInt8Atom(),
                        shape =(tot_frames,
                             im_height,
                             im_width),
                        filters=TABLE_FILTERS
                        )

    img_dataset._v_attrs["CLASS"] = np.string_("IMAGE")
    img_dataset._v_attrs["IMAGE_SUBCLASS"] = np.string_("IMAGE_GRAYSCALE")
    img_dataset._v_attrs["IMAGE_WHITE_IS_ZERO"] = np.array(0, dtype="uint8")
    img_dataset._v_attrs["DISPLAY_ORIGIN"] = np.string_("UL")  # not rotated
    img_dataset._v_attrs["IMAGE_VERSION"] = np.string_("1.2")

    return img_dataset 
Example #5
Source File: test_hdf5.py    From fuel with MIT License 5 votes vote down vote up
def setUp(self):
        num_rows = 500
        filters = tables.Filters(complib='blosc', complevel=5)
        h5file = tables.open_file(
            'tmp.h5', mode='w', title='Test', filters=filters)
        group = h5file.create_group("/", 'Data')
        atom = tables.UInt8Atom()
        y = h5file.create_carray(group, 'y', atom=atom, title='Data targets',
                                 shape=(num_rows, 1), filters=filters)
        for i in range(num_rows):
            y[i] = i
        h5file.flush()
        h5file.close()
        self.dataset = PytablesDataset('tmp.h5', ('y',), 20, 500)
        self.dataset_default = PytablesDataset('tmp.h5', ('y',)) 
Example #6
Source File: data.py    From 3DUnetCNN with MIT License 5 votes vote down vote up
def create_data_file(out_file, n_channels, n_samples, image_shape):
    hdf5_file = tables.open_file(out_file, mode='w')
    filters = tables.Filters(complevel=5, complib='blosc')
    data_shape = tuple([0, n_channels] + list(image_shape))
    truth_shape = tuple([0, 1] + list(image_shape))
    data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,
                                           filters=filters, expectedrows=n_samples)
    truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,
                                            filters=filters, expectedrows=n_samples)
    affine_storage = hdf5_file.create_earray(hdf5_file.root, 'affine', tables.Float32Atom(), shape=(0, 4, 4),
                                             filters=filters, expectedrows=n_samples)
    return hdf5_file, data_storage, truth_storage, affine_storage 
Example #7
Source File: data.py    From 3D-CNNs-for-Liver-Classification with Apache License 2.0 5 votes vote down vote up
def create_data_file(out_file, n_channels, n_samples, image_shape):
    hdf5_file = tables.open_file(out_file, mode='w')
    filters = tables.Filters(complevel=5, complib='blosc')
    data_shape = tuple([0, n_channels] + list(image_shape))
    truth_shape = tuple([0, 1] + list(image_shape))
    data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,
                                           filters=filters, expectedrows=n_samples)
    truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,
                                            filters=filters, expectedrows=n_samples)
    affine_storage = hdf5_file.create_earray(hdf5_file.root, 'affine', tables.Float32Atom(), shape=(0, 4, 4),
                                             filters=filters, expectedrows=n_samples)
    return hdf5_file, data_storage, truth_storage, affine_storage 
Example #8
Source File: preprocess.py    From 3D-CNNs-for-Liver-Classification with Apache License 2.0 5 votes vote down vote up
def create_data_file(out_file, n_channels, n_samples, image_shape):
    hdf5_file = tables.open_file(out_file, mode='w')
    filters = tables.Filters(complevel=5, complib='blosc')
    data_shape = tuple([0, n_channels] + list(image_shape))
    truth_shape = tuple([0, 1])
    data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,
                                           filters=filters, expectedrows=n_samples)
    truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,
                                            filters=filters, expectedrows=n_samples)
    return hdf5_file, data_storage, truth_storage 
Example #9
Source File: data.py    From Keras-Brats-Improved-Unet3d with MIT License 5 votes vote down vote up
def create_data_file(out_file, n_channels, n_samples, image_shape):
    hdf5_file = tables.open_file(out_file, mode='w')
    filters = tables.Filters(complevel=5, complib='blosc')
    data_shape = tuple([0, n_channels] + list(image_shape))
    truth_shape = tuple([0, 1] + list(image_shape))
    data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,
                                           filters=filters, expectedrows=n_samples)
    truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,
                                            filters=filters, expectedrows=n_samples)
    affine_storage = hdf5_file.create_earray(hdf5_file.root, 'affine', tables.Float32Atom(), shape=(0, 4, 4),
                                             filters=filters, expectedrows=n_samples)
    return hdf5_file, data_storage, truth_storage, affine_storage 
Example #10
Source File: hdf5io.py    From deepdish with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def _save_ndarray(handler, group, name, x, filters=None):
    if np.issubdtype(x.dtype, np.unicode_):
        # Convert unicode strings to pure byte arrays
        strtype = b'unicode'
        itemsize = x.itemsize // 4
        atom = tables.UInt8Atom()
        x = x.view(dtype=np.uint8)
    elif np.issubdtype(x.dtype, np.string_):
        strtype = b'ascii'
        itemsize = x.itemsize
        atom = tables.StringAtom(itemsize)
    elif x.dtype == np.object:
        # Not supported by HDF5, force pickling
        _save_pickled(handler, group, x, name=name)
        return
    else:
        atom = tables.Atom.from_dtype(x.dtype)
        strtype = None
        itemsize = None

    if x.ndim > 0 and np.min(x.shape) == 0:
        sh = np.array(x.shape)
        atom0 = tables.Atom.from_dtype(np.dtype(np.int64))
        node = handler.create_array(group, name, atom=atom0,
                                    shape=(sh.size,))
        node._v_attrs.zeroarray_dtype = np.dtype(x.dtype).str.encode('ascii')
        node[:] = sh
        return

    if x.ndim == 0 and len(x.shape) == 0:
        # This is a numpy array scalar. We will store it as a regular scalar
        # instead, which means it will be unpacked as a numpy scalar (not numpy
        # array scalar)
        setattr(group._v_attrs, name, x[()])
        return

    # For small arrays, compression actually leads to larger files, so we are
    # settings a threshold here. The threshold has been set through
    # experimentation.
    if filters is not None and x.size > 300:
        node = handler.create_carray(group, name, atom=atom,
                                     shape=x.shape,
                                     chunkshape=None,
                                     filters=filters)
    else:
        node = handler.create_array(group, name, atom=atom,
                                    shape=x.shape)
    if strtype is not None:
        node._v_attrs.strtype = strtype
        node._v_attrs.itemsize = itemsize
    node[:] = x 
Example #11
Source File: prepare_synsigns.py    From self-ensemble-visual-domain-adapt with MIT License 4 votes vote down vote up
def prepare():
    import os
    import sys
    import numpy as np
    import tables
    import tqdm
    import domain_datasets
    import cv2

    synsigns_path = domain_datasets.get_data_dir('syn_signs')
    data_path = os.path.join(synsigns_path, 'synthetic_data')

    labels_path = os.path.join(data_path, 'train_labelling.txt')

    if not os.path.exists(labels_path):
        print('Labels path {} does not exist'.format(labels_path))
        sys.exit(0)

    # Open the file that lists the image files along with their ground truth class
    lines = [line.strip() for line in open(labels_path, 'r').readlines()]
    lines = [line for line in lines if line != '']

    output_path = os.path.join(synsigns_path, 'syn_signs.h5')
    print('Creating {}...'.format(output_path))
    f_out = tables.open_file(output_path, mode='w')
    g_out = f_out.create_group(f_out.root, 'syn_signs', 'Syn-Signs data')
    filters = tables.Filters(complevel=9, complib='blosc')
    X_u8_arr = f_out.create_earray(
        g_out, 'X_u8', tables.UInt8Atom(), (0, 3, 40, 40), expectedrows=len(lines),
        filters=filters)

    y = []
    for line in tqdm.tqdm(lines):
        image_filename, gt, _ = line.split()
        image_path = os.path.join(data_path, image_filename)

        if not os.path.exists(image_path):
            print('Could not find image file {} mentioned in annotations'.format(image_path))
            return
        image_data = cv2.imread(image_path)[:, :, ::-1]

        X_u8_arr.append(image_data.transpose(2, 0, 1)[None, ...])
        y.append(int(gt))

    y = np.array(y, dtype=np.int32)
    f_out.create_array(g_out, 'y', y)

    print('X.shape={}'.format(X_u8_arr.shape))
    print('y.shape={}'.format(y.shape))

    f_out.close()